1 #include "perf_event_intel_uncore.h"
3 static struct intel_uncore_type *empty_uncore[] = { NULL, };
4 static struct intel_uncore_type **msr_uncores = empty_uncore;
5 static struct intel_uncore_type **pci_uncores = empty_uncore;
6 /* pci bus to socket mapping */
7 static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
9 static struct pci_dev *extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
11 static DEFINE_RAW_SPINLOCK(uncore_box_lock);
13 /* mask of cpus that collect uncore events */
14 static cpumask_t uncore_cpu_mask;
16 /* constraint for the fixed counter */
17 static struct event_constraint constraint_fixed =
18 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
19 static struct event_constraint constraint_empty =
20 EVENT_CONSTRAINT(0, 0, 0);
22 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
25 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
26 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
27 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
28 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
29 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
30 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
31 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
32 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
33 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
34 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
35 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
36 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
37 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
38 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
39 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
40 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
41 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
42 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
43 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
44 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
45 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
46 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
47 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
48 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
49 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
50 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
51 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
52 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
53 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
54 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
55 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
56 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
57 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
58 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
59 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
60 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
61 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
62 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
63 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
64 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
65 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
66 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
67 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
69 static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
70 static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
71 static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
72 static void uncore_pmu_event_read(struct perf_event *event);
74 static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
76 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
79 static struct intel_uncore_box *
80 uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
82 struct intel_uncore_box *box;
84 box = *per_cpu_ptr(pmu->box, cpu);
88 raw_spin_lock(&uncore_box_lock);
89 list_for_each_entry(box, &pmu->box_list, list) {
90 if (box->phys_id == topology_physical_package_id(cpu)) {
91 atomic_inc(&box->refcnt);
92 *per_cpu_ptr(pmu->box, cpu) = box;
96 raw_spin_unlock(&uncore_box_lock);
98 return *per_cpu_ptr(pmu->box, cpu);
101 static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
104 * perf core schedules event on the basis of cpu, uncore events are
105 * collected by one of the cpus inside a physical package.
107 return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
110 static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
114 rdmsrl(event->hw.event_base, count);
120 * generic get constraint function for shared match/mask registers.
122 static struct event_constraint *
123 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
125 struct intel_uncore_extra_reg *er;
126 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
127 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
132 * reg->alloc can be set due to existing state, so for fake box we
133 * need to ignore this, otherwise we might fail to allocate proper
134 * fake state for this extra reg constraint.
136 if (reg1->idx == EXTRA_REG_NONE ||
137 (!uncore_box_is_fake(box) && reg1->alloc))
140 er = &box->shared_regs[reg1->idx];
141 raw_spin_lock_irqsave(&er->lock, flags);
142 if (!atomic_read(&er->ref) ||
143 (er->config1 == reg1->config && er->config2 == reg2->config)) {
144 atomic_inc(&er->ref);
145 er->config1 = reg1->config;
146 er->config2 = reg2->config;
149 raw_spin_unlock_irqrestore(&er->lock, flags);
152 if (!uncore_box_is_fake(box))
157 return &constraint_empty;
160 static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
162 struct intel_uncore_extra_reg *er;
163 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
166 * Only put constraint if extra reg was actually allocated. Also
167 * takes care of event which do not use an extra shared reg.
169 * Also, if this is a fake box we shouldn't touch any event state
170 * (reg->alloc) and we don't care about leaving inconsistent box
171 * state either since it will be thrown out.
173 if (uncore_box_is_fake(box) || !reg1->alloc)
176 er = &box->shared_regs[reg1->idx];
177 atomic_dec(&er->ref);
181 static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
183 struct intel_uncore_extra_reg *er;
187 er = &box->shared_regs[idx];
189 raw_spin_lock_irqsave(&er->lock, flags);
191 raw_spin_unlock_irqrestore(&er->lock, flags);
196 /* Sandy Bridge-EP uncore support */
197 static struct intel_uncore_type snbep_uncore_cbox;
198 static struct intel_uncore_type snbep_uncore_pcu;
200 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
202 struct pci_dev *pdev = box->pci_dev;
203 int box_ctl = uncore_pci_box_ctl(box);
206 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
207 config |= SNBEP_PMON_BOX_CTL_FRZ;
208 pci_write_config_dword(pdev, box_ctl, config);
212 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
214 struct pci_dev *pdev = box->pci_dev;
215 int box_ctl = uncore_pci_box_ctl(box);
218 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
219 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
220 pci_write_config_dword(pdev, box_ctl, config);
224 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
226 struct pci_dev *pdev = box->pci_dev;
227 struct hw_perf_event *hwc = &event->hw;
229 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
232 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
234 struct pci_dev *pdev = box->pci_dev;
235 struct hw_perf_event *hwc = &event->hw;
237 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
240 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
242 struct pci_dev *pdev = box->pci_dev;
243 struct hw_perf_event *hwc = &event->hw;
246 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
247 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
252 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
254 struct pci_dev *pdev = box->pci_dev;
256 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
259 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
264 msr = uncore_msr_box_ctl(box);
267 config |= SNBEP_PMON_BOX_CTL_FRZ;
272 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
277 msr = uncore_msr_box_ctl(box);
280 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
285 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
287 struct hw_perf_event *hwc = &event->hw;
288 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
290 if (reg1->idx != EXTRA_REG_NONE)
291 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
293 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
296 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
297 struct perf_event *event)
299 struct hw_perf_event *hwc = &event->hw;
301 wrmsrl(hwc->config_base, hwc->config);
304 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
306 unsigned msr = uncore_msr_box_ctl(box);
309 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
312 static struct attribute *snbep_uncore_formats_attr[] = {
313 &format_attr_event.attr,
314 &format_attr_umask.attr,
315 &format_attr_edge.attr,
316 &format_attr_inv.attr,
317 &format_attr_thresh8.attr,
321 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
322 &format_attr_event.attr,
323 &format_attr_umask.attr,
324 &format_attr_edge.attr,
325 &format_attr_inv.attr,
326 &format_attr_thresh5.attr,
330 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
331 &format_attr_event.attr,
332 &format_attr_umask.attr,
333 &format_attr_edge.attr,
334 &format_attr_tid_en.attr,
335 &format_attr_inv.attr,
336 &format_attr_thresh8.attr,
337 &format_attr_filter_tid.attr,
338 &format_attr_filter_nid.attr,
339 &format_attr_filter_state.attr,
340 &format_attr_filter_opc.attr,
344 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
345 &format_attr_event_ext.attr,
346 &format_attr_occ_sel.attr,
347 &format_attr_edge.attr,
348 &format_attr_inv.attr,
349 &format_attr_thresh5.attr,
350 &format_attr_occ_invert.attr,
351 &format_attr_occ_edge.attr,
352 &format_attr_filter_band0.attr,
353 &format_attr_filter_band1.attr,
354 &format_attr_filter_band2.attr,
355 &format_attr_filter_band3.attr,
359 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
360 &format_attr_event_ext.attr,
361 &format_attr_umask.attr,
362 &format_attr_edge.attr,
363 &format_attr_inv.attr,
364 &format_attr_thresh8.attr,
365 &format_attr_match_rds.attr,
366 &format_attr_match_rnid30.attr,
367 &format_attr_match_rnid4.attr,
368 &format_attr_match_dnid.attr,
369 &format_attr_match_mc.attr,
370 &format_attr_match_opc.attr,
371 &format_attr_match_vnw.attr,
372 &format_attr_match0.attr,
373 &format_attr_match1.attr,
374 &format_attr_mask_rds.attr,
375 &format_attr_mask_rnid30.attr,
376 &format_attr_mask_rnid4.attr,
377 &format_attr_mask_dnid.attr,
378 &format_attr_mask_mc.attr,
379 &format_attr_mask_opc.attr,
380 &format_attr_mask_vnw.attr,
381 &format_attr_mask0.attr,
382 &format_attr_mask1.attr,
386 static struct uncore_event_desc snbep_uncore_imc_events[] = {
387 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
388 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
389 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
390 { /* end: all zeroes */ },
393 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
394 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
395 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
396 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
397 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
398 { /* end: all zeroes */ },
401 static struct attribute_group snbep_uncore_format_group = {
403 .attrs = snbep_uncore_formats_attr,
406 static struct attribute_group snbep_uncore_ubox_format_group = {
408 .attrs = snbep_uncore_ubox_formats_attr,
411 static struct attribute_group snbep_uncore_cbox_format_group = {
413 .attrs = snbep_uncore_cbox_formats_attr,
416 static struct attribute_group snbep_uncore_pcu_format_group = {
418 .attrs = snbep_uncore_pcu_formats_attr,
421 static struct attribute_group snbep_uncore_qpi_format_group = {
423 .attrs = snbep_uncore_qpi_formats_attr,
426 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
427 .init_box = snbep_uncore_msr_init_box, \
428 .disable_box = snbep_uncore_msr_disable_box, \
429 .enable_box = snbep_uncore_msr_enable_box, \
430 .disable_event = snbep_uncore_msr_disable_event, \
431 .enable_event = snbep_uncore_msr_enable_event, \
432 .read_counter = uncore_msr_read_counter
434 static struct intel_uncore_ops snbep_uncore_msr_ops = {
435 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
438 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
439 .init_box = snbep_uncore_pci_init_box, \
440 .disable_box = snbep_uncore_pci_disable_box, \
441 .enable_box = snbep_uncore_pci_enable_box, \
442 .disable_event = snbep_uncore_pci_disable_event, \
443 .read_counter = snbep_uncore_pci_read_counter
445 static struct intel_uncore_ops snbep_uncore_pci_ops = {
446 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
447 .enable_event = snbep_uncore_pci_enable_event, \
450 static struct event_constraint snbep_uncore_cbox_constraints[] = {
451 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
452 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
453 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
454 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
455 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
456 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
457 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
458 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
459 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
460 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
461 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
462 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
463 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
464 EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
465 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
466 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
467 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
468 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
469 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
470 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
471 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
472 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
473 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
474 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
475 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
476 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
480 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
481 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
482 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
483 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
484 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
485 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
486 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
487 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
488 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
489 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
490 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
494 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
495 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
496 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
497 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
498 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
499 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
500 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
501 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
502 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
503 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
504 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
505 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
506 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
507 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
508 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
509 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
510 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
511 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
512 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
513 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
514 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
515 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
516 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
517 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
518 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
519 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
520 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
521 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
522 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
526 static struct intel_uncore_type snbep_uncore_ubox = {
531 .fixed_ctr_bits = 48,
532 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
533 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
534 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
535 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
536 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
537 .ops = &snbep_uncore_msr_ops,
538 .format_group = &snbep_uncore_ubox_format_group,
541 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
542 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
543 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
544 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
545 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
546 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
547 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
548 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
549 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
550 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
551 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
552 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
553 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
554 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
555 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
556 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
557 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
558 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
559 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
560 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
561 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
562 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
563 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
564 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
565 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
566 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
570 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
572 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
573 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
576 if (uncore_box_is_fake(box))
579 for (i = 0; i < 5; i++) {
580 if (reg1->alloc & (0x1 << i))
581 atomic_sub(1 << (i * 6), &er->ref);
586 static struct event_constraint *
587 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
588 u64 (*cbox_filter_mask)(int fields))
590 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
591 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
596 if (reg1->idx == EXTRA_REG_NONE)
599 raw_spin_lock_irqsave(&er->lock, flags);
600 for (i = 0; i < 5; i++) {
601 if (!(reg1->idx & (0x1 << i)))
603 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
606 mask = cbox_filter_mask(0x1 << i);
607 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
608 !((reg1->config ^ er->config) & mask)) {
609 atomic_add(1 << (i * 6), &er->ref);
611 er->config |= reg1->config & mask;
617 raw_spin_unlock_irqrestore(&er->lock, flags);
621 if (!uncore_box_is_fake(box))
622 reg1->alloc |= alloc;
626 for (; i >= 0; i--) {
627 if (alloc & (0x1 << i))
628 atomic_sub(1 << (i * 6), &er->ref);
630 return &constraint_empty;
633 static u64 snbep_cbox_filter_mask(int fields)
638 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
640 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
642 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
644 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
649 static struct event_constraint *
650 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
652 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
655 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
657 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
658 struct extra_reg *er;
661 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
662 if (er->event != (event->hw.config & er->config_mask))
668 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
669 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
670 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
676 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
677 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
678 .hw_config = snbep_cbox_hw_config,
679 .get_constraint = snbep_cbox_get_constraint,
680 .put_constraint = snbep_cbox_put_constraint,
683 static struct intel_uncore_type snbep_uncore_cbox = {
688 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
689 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
690 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
691 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
692 .msr_offset = SNBEP_CBO_MSR_OFFSET,
693 .num_shared_regs = 1,
694 .constraints = snbep_uncore_cbox_constraints,
695 .ops = &snbep_uncore_cbox_ops,
696 .format_group = &snbep_uncore_cbox_format_group,
699 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
701 struct hw_perf_event *hwc = &event->hw;
702 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
703 u64 config = reg1->config;
705 if (new_idx > reg1->idx)
706 config <<= 8 * (new_idx - reg1->idx);
708 config >>= 8 * (reg1->idx - new_idx);
711 hwc->config += new_idx - reg1->idx;
712 reg1->config = config;
718 static struct event_constraint *
719 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
721 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
722 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
725 u64 mask, config1 = reg1->config;
728 if (reg1->idx == EXTRA_REG_NONE ||
729 (!uncore_box_is_fake(box) && reg1->alloc))
732 mask = 0xffULL << (idx * 8);
733 raw_spin_lock_irqsave(&er->lock, flags);
734 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
735 !((config1 ^ er->config) & mask)) {
736 atomic_add(1 << (idx * 8), &er->ref);
738 er->config |= config1 & mask;
741 raw_spin_unlock_irqrestore(&er->lock, flags);
745 if (idx != reg1->idx) {
746 config1 = snbep_pcu_alter_er(event, idx, false);
749 return &constraint_empty;
752 if (!uncore_box_is_fake(box)) {
753 if (idx != reg1->idx)
754 snbep_pcu_alter_er(event, idx, true);
760 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
762 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
763 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
765 if (uncore_box_is_fake(box) || !reg1->alloc)
768 atomic_sub(1 << (reg1->idx * 8), &er->ref);
772 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
774 struct hw_perf_event *hwc = &event->hw;
775 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
776 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
778 if (ev_sel >= 0xb && ev_sel <= 0xe) {
779 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
780 reg1->idx = ev_sel - 0xb;
781 reg1->config = event->attr.config1 & (0xff << reg1->idx);
786 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
787 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
788 .hw_config = snbep_pcu_hw_config,
789 .get_constraint = snbep_pcu_get_constraint,
790 .put_constraint = snbep_pcu_put_constraint,
793 static struct intel_uncore_type snbep_uncore_pcu = {
798 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
799 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
800 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
801 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
802 .num_shared_regs = 1,
803 .ops = &snbep_uncore_pcu_ops,
804 .format_group = &snbep_uncore_pcu_format_group,
807 static struct intel_uncore_type *snbep_msr_uncores[] = {
815 SNBEP_PCI_QPI_PORT0_FILTER,
816 SNBEP_PCI_QPI_PORT1_FILTER,
819 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
821 struct hw_perf_event *hwc = &event->hw;
822 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
823 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
825 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
827 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
828 reg1->config = event->attr.config1;
829 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
830 reg2->config = event->attr.config2;
835 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
837 struct pci_dev *pdev = box->pci_dev;
838 struct hw_perf_event *hwc = &event->hw;
839 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
840 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
842 if (reg1->idx != EXTRA_REG_NONE) {
843 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
844 struct pci_dev *filter_pdev = extra_pci_dev[box->phys_id][idx];
845 WARN_ON_ONCE(!filter_pdev);
847 pci_write_config_dword(filter_pdev, reg1->reg,
849 pci_write_config_dword(filter_pdev, reg1->reg + 4,
850 (u32)(reg1->config >> 32));
851 pci_write_config_dword(filter_pdev, reg2->reg,
853 pci_write_config_dword(filter_pdev, reg2->reg + 4,
854 (u32)(reg2->config >> 32));
858 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
861 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
862 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
863 .enable_event = snbep_qpi_enable_event,
864 .hw_config = snbep_qpi_hw_config,
865 .get_constraint = uncore_get_constraint,
866 .put_constraint = uncore_put_constraint,
869 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
870 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
871 .event_ctl = SNBEP_PCI_PMON_CTL0, \
872 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
873 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
874 .ops = &snbep_uncore_pci_ops, \
875 .format_group = &snbep_uncore_format_group
877 static struct intel_uncore_type snbep_uncore_ha = {
882 SNBEP_UNCORE_PCI_COMMON_INIT(),
885 static struct intel_uncore_type snbep_uncore_imc = {
890 .fixed_ctr_bits = 48,
891 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
892 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
893 .event_descs = snbep_uncore_imc_events,
894 SNBEP_UNCORE_PCI_COMMON_INIT(),
897 static struct intel_uncore_type snbep_uncore_qpi = {
902 .perf_ctr = SNBEP_PCI_PMON_CTR0,
903 .event_ctl = SNBEP_PCI_PMON_CTL0,
904 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
905 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
906 .num_shared_regs = 1,
907 .ops = &snbep_uncore_qpi_ops,
908 .event_descs = snbep_uncore_qpi_events,
909 .format_group = &snbep_uncore_qpi_format_group,
913 static struct intel_uncore_type snbep_uncore_r2pcie = {
918 .constraints = snbep_uncore_r2pcie_constraints,
919 SNBEP_UNCORE_PCI_COMMON_INIT(),
922 static struct intel_uncore_type snbep_uncore_r3qpi = {
927 .constraints = snbep_uncore_r3qpi_constraints,
928 SNBEP_UNCORE_PCI_COMMON_INIT(),
933 SNBEP_PCI_UNCORE_IMC,
934 SNBEP_PCI_UNCORE_QPI,
935 SNBEP_PCI_UNCORE_R2PCIE,
936 SNBEP_PCI_UNCORE_R3QPI,
939 static struct intel_uncore_type *snbep_pci_uncores[] = {
940 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
941 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
942 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
943 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
944 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
948 static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
950 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
951 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
954 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
955 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
958 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
959 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
962 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
963 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
966 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
967 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
970 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
971 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
974 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
975 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
978 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
979 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
982 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
983 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
986 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
987 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
989 { /* QPI Port 0 filter */
990 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
991 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
992 SNBEP_PCI_QPI_PORT0_FILTER),
994 { /* QPI Port 0 filter */
995 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
996 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
997 SNBEP_PCI_QPI_PORT1_FILTER),
999 { /* end: all zeroes */ }
1002 static struct pci_driver snbep_uncore_pci_driver = {
1003 .name = "snbep_uncore",
1004 .id_table = snbep_uncore_pci_ids,
1008 * build pci bus to socket mapping
1010 static int snbep_pci2phy_map_init(int devid)
1012 struct pci_dev *ubox_dev = NULL;
1018 /* find the UBOX device */
1019 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1022 bus = ubox_dev->bus->number;
1023 /* get the Node ID of the local register */
1024 err = pci_read_config_dword(ubox_dev, 0x40, &config);
1028 /* get the Node ID mapping */
1029 err = pci_read_config_dword(ubox_dev, 0x54, &config);
1033 * every three bits in the Node ID mapping register maps
1034 * to a particular node.
1036 for (i = 0; i < 8; i++) {
1037 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1038 pcibus_to_physid[bus] = i;
1046 * For PCI bus with no UBOX device, find the next bus
1047 * that has UBOX device and use its mapping.
1050 for (bus = 255; bus >= 0; bus--) {
1051 if (pcibus_to_physid[bus] >= 0)
1052 i = pcibus_to_physid[bus];
1054 pcibus_to_physid[bus] = i;
1059 pci_dev_put(ubox_dev);
1061 return err ? pcibios_err_to_errno(err) : 0;
1063 /* end of Sandy Bridge-EP uncore support */
1065 /* IvyTown uncore support */
1066 static void ivt_uncore_msr_init_box(struct intel_uncore_box *box)
1068 unsigned msr = uncore_msr_box_ctl(box);
1070 wrmsrl(msr, IVT_PMON_BOX_CTL_INT);
1073 static void ivt_uncore_pci_init_box(struct intel_uncore_box *box)
1075 struct pci_dev *pdev = box->pci_dev;
1077 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT);
1080 #define IVT_UNCORE_MSR_OPS_COMMON_INIT() \
1081 .init_box = ivt_uncore_msr_init_box, \
1082 .disable_box = snbep_uncore_msr_disable_box, \
1083 .enable_box = snbep_uncore_msr_enable_box, \
1084 .disable_event = snbep_uncore_msr_disable_event, \
1085 .enable_event = snbep_uncore_msr_enable_event, \
1086 .read_counter = uncore_msr_read_counter
1088 static struct intel_uncore_ops ivt_uncore_msr_ops = {
1089 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1092 static struct intel_uncore_ops ivt_uncore_pci_ops = {
1093 .init_box = ivt_uncore_pci_init_box,
1094 .disable_box = snbep_uncore_pci_disable_box,
1095 .enable_box = snbep_uncore_pci_enable_box,
1096 .disable_event = snbep_uncore_pci_disable_event,
1097 .enable_event = snbep_uncore_pci_enable_event,
1098 .read_counter = snbep_uncore_pci_read_counter,
1101 #define IVT_UNCORE_PCI_COMMON_INIT() \
1102 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1103 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1104 .event_mask = IVT_PMON_RAW_EVENT_MASK, \
1105 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1106 .ops = &ivt_uncore_pci_ops, \
1107 .format_group = &ivt_uncore_format_group
1109 static struct attribute *ivt_uncore_formats_attr[] = {
1110 &format_attr_event.attr,
1111 &format_attr_umask.attr,
1112 &format_attr_edge.attr,
1113 &format_attr_inv.attr,
1114 &format_attr_thresh8.attr,
1118 static struct attribute *ivt_uncore_ubox_formats_attr[] = {
1119 &format_attr_event.attr,
1120 &format_attr_umask.attr,
1121 &format_attr_edge.attr,
1122 &format_attr_inv.attr,
1123 &format_attr_thresh5.attr,
1127 static struct attribute *ivt_uncore_cbox_formats_attr[] = {
1128 &format_attr_event.attr,
1129 &format_attr_umask.attr,
1130 &format_attr_edge.attr,
1131 &format_attr_tid_en.attr,
1132 &format_attr_thresh8.attr,
1133 &format_attr_filter_tid.attr,
1134 &format_attr_filter_link.attr,
1135 &format_attr_filter_state2.attr,
1136 &format_attr_filter_nid2.attr,
1137 &format_attr_filter_opc2.attr,
1141 static struct attribute *ivt_uncore_pcu_formats_attr[] = {
1142 &format_attr_event_ext.attr,
1143 &format_attr_occ_sel.attr,
1144 &format_attr_edge.attr,
1145 &format_attr_thresh5.attr,
1146 &format_attr_occ_invert.attr,
1147 &format_attr_occ_edge.attr,
1148 &format_attr_filter_band0.attr,
1149 &format_attr_filter_band1.attr,
1150 &format_attr_filter_band2.attr,
1151 &format_attr_filter_band3.attr,
1155 static struct attribute *ivt_uncore_qpi_formats_attr[] = {
1156 &format_attr_event_ext.attr,
1157 &format_attr_umask.attr,
1158 &format_attr_edge.attr,
1159 &format_attr_thresh8.attr,
1160 &format_attr_match_rds.attr,
1161 &format_attr_match_rnid30.attr,
1162 &format_attr_match_rnid4.attr,
1163 &format_attr_match_dnid.attr,
1164 &format_attr_match_mc.attr,
1165 &format_attr_match_opc.attr,
1166 &format_attr_match_vnw.attr,
1167 &format_attr_match0.attr,
1168 &format_attr_match1.attr,
1169 &format_attr_mask_rds.attr,
1170 &format_attr_mask_rnid30.attr,
1171 &format_attr_mask_rnid4.attr,
1172 &format_attr_mask_dnid.attr,
1173 &format_attr_mask_mc.attr,
1174 &format_attr_mask_opc.attr,
1175 &format_attr_mask_vnw.attr,
1176 &format_attr_mask0.attr,
1177 &format_attr_mask1.attr,
1181 static struct attribute_group ivt_uncore_format_group = {
1183 .attrs = ivt_uncore_formats_attr,
1186 static struct attribute_group ivt_uncore_ubox_format_group = {
1188 .attrs = ivt_uncore_ubox_formats_attr,
1191 static struct attribute_group ivt_uncore_cbox_format_group = {
1193 .attrs = ivt_uncore_cbox_formats_attr,
1196 static struct attribute_group ivt_uncore_pcu_format_group = {
1198 .attrs = ivt_uncore_pcu_formats_attr,
1201 static struct attribute_group ivt_uncore_qpi_format_group = {
1203 .attrs = ivt_uncore_qpi_formats_attr,
1206 static struct intel_uncore_type ivt_uncore_ubox = {
1210 .perf_ctr_bits = 44,
1211 .fixed_ctr_bits = 48,
1212 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1213 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1214 .event_mask = IVT_U_MSR_PMON_RAW_EVENT_MASK,
1215 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1216 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1217 .ops = &ivt_uncore_msr_ops,
1218 .format_group = &ivt_uncore_ubox_format_group,
1221 static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1222 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1223 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1224 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1225 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1226 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1227 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1228 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1229 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1230 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1231 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1232 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1233 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1234 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1235 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1236 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1237 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1238 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1239 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1240 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1241 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1242 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1243 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1244 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1245 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1246 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1247 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1248 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1249 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1250 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1251 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1252 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1253 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1254 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1255 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1256 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1257 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1258 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1262 static u64 ivt_cbox_filter_mask(int fields)
1267 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID;
1269 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK;
1271 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE;
1273 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID;
1275 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC;
1280 static struct event_constraint *
1281 ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1283 return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask);
1286 static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1288 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1289 struct extra_reg *er;
1292 for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) {
1293 if (er->event != (event->hw.config & er->config_mask))
1299 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1300 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1301 reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx);
1307 static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1309 struct hw_perf_event *hwc = &event->hw;
1310 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1312 if (reg1->idx != EXTRA_REG_NONE) {
1313 u64 filter = uncore_shared_reg_config(box, 0);
1314 wrmsrl(reg1->reg, filter & 0xffffffff);
1315 wrmsrl(reg1->reg + 6, filter >> 32);
1318 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1321 static struct intel_uncore_ops ivt_uncore_cbox_ops = {
1322 .init_box = ivt_uncore_msr_init_box,
1323 .disable_box = snbep_uncore_msr_disable_box,
1324 .enable_box = snbep_uncore_msr_enable_box,
1325 .disable_event = snbep_uncore_msr_disable_event,
1326 .enable_event = ivt_cbox_enable_event,
1327 .read_counter = uncore_msr_read_counter,
1328 .hw_config = ivt_cbox_hw_config,
1329 .get_constraint = ivt_cbox_get_constraint,
1330 .put_constraint = snbep_cbox_put_constraint,
1333 static struct intel_uncore_type ivt_uncore_cbox = {
1337 .perf_ctr_bits = 44,
1338 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1339 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1340 .event_mask = IVT_CBO_MSR_PMON_RAW_EVENT_MASK,
1341 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1342 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1343 .num_shared_regs = 1,
1344 .constraints = snbep_uncore_cbox_constraints,
1345 .ops = &ivt_uncore_cbox_ops,
1346 .format_group = &ivt_uncore_cbox_format_group,
1349 static struct intel_uncore_ops ivt_uncore_pcu_ops = {
1350 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1351 .hw_config = snbep_pcu_hw_config,
1352 .get_constraint = snbep_pcu_get_constraint,
1353 .put_constraint = snbep_pcu_put_constraint,
1356 static struct intel_uncore_type ivt_uncore_pcu = {
1360 .perf_ctr_bits = 48,
1361 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1362 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1363 .event_mask = IVT_PCU_MSR_PMON_RAW_EVENT_MASK,
1364 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1365 .num_shared_regs = 1,
1366 .ops = &ivt_uncore_pcu_ops,
1367 .format_group = &ivt_uncore_pcu_format_group,
1370 static struct intel_uncore_type *ivt_msr_uncores[] = {
1377 static struct intel_uncore_type ivt_uncore_ha = {
1381 .perf_ctr_bits = 48,
1382 IVT_UNCORE_PCI_COMMON_INIT(),
1385 static struct intel_uncore_type ivt_uncore_imc = {
1389 .perf_ctr_bits = 48,
1390 .fixed_ctr_bits = 48,
1391 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1392 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1393 IVT_UNCORE_PCI_COMMON_INIT(),
1396 /* registers in IRP boxes are not properly aligned */
1397 static unsigned ivt_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1398 static unsigned ivt_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1400 static void ivt_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1402 struct pci_dev *pdev = box->pci_dev;
1403 struct hw_perf_event *hwc = &event->hw;
1405 pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx],
1406 hwc->config | SNBEP_PMON_CTL_EN);
1409 static void ivt_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1411 struct pci_dev *pdev = box->pci_dev;
1412 struct hw_perf_event *hwc = &event->hw;
1414 pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx], hwc->config);
1417 static u64 ivt_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1419 struct pci_dev *pdev = box->pci_dev;
1420 struct hw_perf_event *hwc = &event->hw;
1423 pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1424 pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1429 static struct intel_uncore_ops ivt_uncore_irp_ops = {
1430 .init_box = ivt_uncore_pci_init_box,
1431 .disable_box = snbep_uncore_pci_disable_box,
1432 .enable_box = snbep_uncore_pci_enable_box,
1433 .disable_event = ivt_uncore_irp_disable_event,
1434 .enable_event = ivt_uncore_irp_enable_event,
1435 .read_counter = ivt_uncore_irp_read_counter,
1438 static struct intel_uncore_type ivt_uncore_irp = {
1442 .perf_ctr_bits = 48,
1443 .event_mask = IVT_PMON_RAW_EVENT_MASK,
1444 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1445 .ops = &ivt_uncore_irp_ops,
1446 .format_group = &ivt_uncore_format_group,
1449 static struct intel_uncore_ops ivt_uncore_qpi_ops = {
1450 .init_box = ivt_uncore_pci_init_box,
1451 .disable_box = snbep_uncore_pci_disable_box,
1452 .enable_box = snbep_uncore_pci_enable_box,
1453 .disable_event = snbep_uncore_pci_disable_event,
1454 .enable_event = snbep_qpi_enable_event,
1455 .read_counter = snbep_uncore_pci_read_counter,
1456 .hw_config = snbep_qpi_hw_config,
1457 .get_constraint = uncore_get_constraint,
1458 .put_constraint = uncore_put_constraint,
1461 static struct intel_uncore_type ivt_uncore_qpi = {
1465 .perf_ctr_bits = 48,
1466 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1467 .event_ctl = SNBEP_PCI_PMON_CTL0,
1468 .event_mask = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
1469 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1470 .num_shared_regs = 1,
1471 .ops = &ivt_uncore_qpi_ops,
1472 .format_group = &ivt_uncore_qpi_format_group,
1475 static struct intel_uncore_type ivt_uncore_r2pcie = {
1479 .perf_ctr_bits = 44,
1480 .constraints = snbep_uncore_r2pcie_constraints,
1481 IVT_UNCORE_PCI_COMMON_INIT(),
1484 static struct intel_uncore_type ivt_uncore_r3qpi = {
1488 .perf_ctr_bits = 44,
1489 .constraints = snbep_uncore_r3qpi_constraints,
1490 IVT_UNCORE_PCI_COMMON_INIT(),
1498 IVT_PCI_UNCORE_R2PCIE,
1499 IVT_PCI_UNCORE_R3QPI,
1502 static struct intel_uncore_type *ivt_pci_uncores[] = {
1503 [IVT_PCI_UNCORE_HA] = &ivt_uncore_ha,
1504 [IVT_PCI_UNCORE_IMC] = &ivt_uncore_imc,
1505 [IVT_PCI_UNCORE_IRP] = &ivt_uncore_irp,
1506 [IVT_PCI_UNCORE_QPI] = &ivt_uncore_qpi,
1507 [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
1508 [IVT_PCI_UNCORE_R3QPI] = &ivt_uncore_r3qpi,
1512 static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
1513 { /* Home Agent 0 */
1514 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1515 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0),
1517 { /* Home Agent 1 */
1518 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1519 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1),
1521 { /* MC0 Channel 0 */
1522 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1523 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0),
1525 { /* MC0 Channel 1 */
1526 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1527 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1),
1529 { /* MC0 Channel 3 */
1530 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1531 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2),
1533 { /* MC0 Channel 4 */
1534 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1535 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3),
1537 { /* MC1 Channel 0 */
1538 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1539 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4),
1541 { /* MC1 Channel 1 */
1542 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1543 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5),
1545 { /* MC1 Channel 3 */
1546 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1547 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6),
1549 { /* MC1 Channel 4 */
1550 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1551 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7),
1554 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1555 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IRP, 0),
1558 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1559 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0),
1562 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1563 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1),
1566 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1567 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2),
1570 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1571 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0),
1573 { /* R3QPI0 Link 0 */
1574 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1575 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0),
1577 { /* R3QPI0 Link 1 */
1578 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1579 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1),
1581 { /* R3QPI1 Link 2 */
1582 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1583 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2),
1585 { /* QPI Port 0 filter */
1586 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1587 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1588 SNBEP_PCI_QPI_PORT0_FILTER),
1590 { /* QPI Port 0 filter */
1591 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1592 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1593 SNBEP_PCI_QPI_PORT1_FILTER),
1595 { /* end: all zeroes */ }
1598 static struct pci_driver ivt_uncore_pci_driver = {
1599 .name = "ivt_uncore",
1600 .id_table = ivt_uncore_pci_ids,
1602 /* end of IvyTown uncore support */
1604 /* Sandy Bridge uncore support */
1605 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1607 struct hw_perf_event *hwc = &event->hw;
1609 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1610 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1612 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
1615 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1617 wrmsrl(event->hw.config_base, 0);
1620 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
1622 if (box->pmu->pmu_idx == 0) {
1623 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
1624 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
1628 static struct uncore_event_desc snb_uncore_events[] = {
1629 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
1630 { /* end: all zeroes */ },
1633 static struct attribute *snb_uncore_formats_attr[] = {
1634 &format_attr_event.attr,
1635 &format_attr_umask.attr,
1636 &format_attr_edge.attr,
1637 &format_attr_inv.attr,
1638 &format_attr_cmask5.attr,
1642 static struct attribute_group snb_uncore_format_group = {
1644 .attrs = snb_uncore_formats_attr,
1647 static struct intel_uncore_ops snb_uncore_msr_ops = {
1648 .init_box = snb_uncore_msr_init_box,
1649 .disable_event = snb_uncore_msr_disable_event,
1650 .enable_event = snb_uncore_msr_enable_event,
1651 .read_counter = uncore_msr_read_counter,
1654 static struct event_constraint snb_uncore_cbox_constraints[] = {
1655 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
1656 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
1657 EVENT_CONSTRAINT_END
1660 static struct intel_uncore_type snb_uncore_cbox = {
1664 .perf_ctr_bits = 44,
1665 .fixed_ctr_bits = 48,
1666 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
1667 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
1668 .fixed_ctr = SNB_UNC_FIXED_CTR,
1669 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
1671 .event_mask = SNB_UNC_RAW_EVENT_MASK,
1672 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
1673 .constraints = snb_uncore_cbox_constraints,
1674 .ops = &snb_uncore_msr_ops,
1675 .format_group = &snb_uncore_format_group,
1676 .event_descs = snb_uncore_events,
1679 static struct intel_uncore_type *snb_msr_uncores[] = {
1688 static struct uncore_event_desc snb_uncore_imc_events[] = {
1689 INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"),
1690 INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
1691 INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
1693 INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
1694 INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
1695 INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
1697 { /* end: all zeroes */ },
1700 #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff
1701 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48
1703 /* page size multiple covering all config regs */
1704 #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000
1706 #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1
1707 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050
1708 #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2
1709 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
1710 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
1712 static struct attribute *snb_uncore_imc_formats_attr[] = {
1713 &format_attr_event.attr,
1717 static struct attribute_group snb_uncore_imc_format_group = {
1719 .attrs = snb_uncore_imc_formats_attr,
1722 static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
1724 struct pci_dev *pdev = box->pci_dev;
1725 int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
1726 resource_size_t addr;
1729 pci_read_config_dword(pdev, where, &pci_dword);
1732 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1733 pci_read_config_dword(pdev, where + 4, &pci_dword);
1734 addr |= ((resource_size_t)pci_dword << 32);
1737 addr &= ~(PAGE_SIZE - 1);
1739 box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
1740 box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
1743 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
1746 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
1749 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1752 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1755 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1757 struct hw_perf_event *hwc = &event->hw;
1759 return (u64)*(unsigned int *)(box->io_addr + hwc->event_base);
1763 * custom event_init() function because we define our own fixed, free
1764 * running counters, so we do not want to conflict with generic uncore
1765 * logic. Also simplifies processing
1767 static int snb_uncore_imc_event_init(struct perf_event *event)
1769 struct intel_uncore_pmu *pmu;
1770 struct intel_uncore_box *box;
1771 struct hw_perf_event *hwc = &event->hw;
1772 u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
1775 if (event->attr.type != event->pmu->type)
1778 pmu = uncore_event_to_pmu(event);
1779 /* no device found for this pmu */
1780 if (pmu->func_id < 0)
1783 /* Sampling not supported yet */
1784 if (hwc->sample_period)
1787 /* unsupported modes and filters */
1788 if (event->attr.exclude_user ||
1789 event->attr.exclude_kernel ||
1790 event->attr.exclude_hv ||
1791 event->attr.exclude_idle ||
1792 event->attr.exclude_host ||
1793 event->attr.exclude_guest ||
1794 event->attr.sample_period) /* no sampling */
1798 * Place all uncore events for a particular physical package
1804 /* check only supported bits are set */
1805 if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
1808 box = uncore_pmu_to_box(pmu, event->cpu);
1809 if (!box || box->cpu < 0)
1812 event->cpu = box->cpu;
1815 event->hw.last_tag = ~0ULL;
1816 event->hw.extra_reg.idx = EXTRA_REG_NONE;
1817 event->hw.branch_reg.idx = EXTRA_REG_NONE;
1819 * check event is known (whitelist, determines counter)
1822 case SNB_UNCORE_PCI_IMC_DATA_READS:
1823 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
1824 idx = UNCORE_PMC_IDX_FIXED;
1826 case SNB_UNCORE_PCI_IMC_DATA_WRITES:
1827 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
1828 idx = UNCORE_PMC_IDX_FIXED + 1;
1834 /* must be done before validate_group */
1835 event->hw.event_base = base;
1836 event->hw.config = cfg;
1837 event->hw.idx = idx;
1839 /* no group validation needed, we have free running counters */
1844 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1849 static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
1851 struct intel_uncore_box *box = uncore_event_to_box(event);
1854 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1857 event->hw.state = 0;
1860 list_add_tail(&event->active_entry, &box->active_list);
1862 count = snb_uncore_imc_read_counter(box, event);
1863 local64_set(&event->hw.prev_count, count);
1865 if (box->n_active == 1)
1866 uncore_pmu_start_hrtimer(box);
1869 static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
1871 struct intel_uncore_box *box = uncore_event_to_box(event);
1872 struct hw_perf_event *hwc = &event->hw;
1874 if (!(hwc->state & PERF_HES_STOPPED)) {
1877 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1878 hwc->state |= PERF_HES_STOPPED;
1880 list_del(&event->active_entry);
1882 if (box->n_active == 0)
1883 uncore_pmu_cancel_hrtimer(box);
1886 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1888 * Drain the remaining delta count out of a event
1889 * that we are disabling:
1891 uncore_perf_event_update(box, event);
1892 hwc->state |= PERF_HES_UPTODATE;
1896 static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
1898 struct intel_uncore_box *box = uncore_event_to_box(event);
1899 struct hw_perf_event *hwc = &event->hw;
1904 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1905 if (!(flags & PERF_EF_START))
1906 hwc->state |= PERF_HES_ARCH;
1908 snb_uncore_imc_event_start(event, 0);
1915 static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
1917 struct intel_uncore_box *box = uncore_event_to_box(event);
1920 snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
1922 for (i = 0; i < box->n_events; i++) {
1923 if (event == box->event_list[i]) {
1930 static int snb_pci2phy_map_init(int devid)
1932 struct pci_dev *dev = NULL;
1935 dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
1939 bus = dev->bus->number;
1941 pcibus_to_physid[bus] = 0;
1948 static struct pmu snb_uncore_imc_pmu = {
1949 .task_ctx_nr = perf_invalid_context,
1950 .event_init = snb_uncore_imc_event_init,
1951 .add = snb_uncore_imc_event_add,
1952 .del = snb_uncore_imc_event_del,
1953 .start = snb_uncore_imc_event_start,
1954 .stop = snb_uncore_imc_event_stop,
1955 .read = uncore_pmu_event_read,
1958 static struct intel_uncore_ops snb_uncore_imc_ops = {
1959 .init_box = snb_uncore_imc_init_box,
1960 .enable_box = snb_uncore_imc_enable_box,
1961 .disable_box = snb_uncore_imc_disable_box,
1962 .disable_event = snb_uncore_imc_disable_event,
1963 .enable_event = snb_uncore_imc_enable_event,
1964 .hw_config = snb_uncore_imc_hw_config,
1965 .read_counter = snb_uncore_imc_read_counter,
1968 static struct intel_uncore_type snb_uncore_imc = {
1972 .fixed_ctr_bits = 32,
1973 .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE,
1974 .event_descs = snb_uncore_imc_events,
1975 .format_group = &snb_uncore_imc_format_group,
1976 .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
1977 .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK,
1978 .ops = &snb_uncore_imc_ops,
1979 .pmu = &snb_uncore_imc_pmu,
1982 static struct intel_uncore_type *snb_pci_uncores[] = {
1983 [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc,
1987 static DEFINE_PCI_DEVICE_TABLE(snb_uncore_pci_ids) = {
1989 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
1990 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1994 static DEFINE_PCI_DEVICE_TABLE(ivb_uncore_pci_ids) = {
1996 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
1997 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
2001 static DEFINE_PCI_DEVICE_TABLE(hsw_uncore_pci_ids) = {
2003 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
2004 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
2008 static struct pci_driver snb_uncore_pci_driver = {
2009 .name = "snb_uncore",
2010 .id_table = snb_uncore_pci_ids,
2013 static struct pci_driver ivb_uncore_pci_driver = {
2014 .name = "ivb_uncore",
2015 .id_table = ivb_uncore_pci_ids,
2018 static struct pci_driver hsw_uncore_pci_driver = {
2019 .name = "hsw_uncore",
2020 .id_table = hsw_uncore_pci_ids,
2023 /* end of Sandy Bridge uncore support */
2025 /* Nehalem uncore support */
2026 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
2028 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
2031 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
2033 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
2036 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2038 struct hw_perf_event *hwc = &event->hw;
2040 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
2041 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
2043 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
2046 static struct attribute *nhm_uncore_formats_attr[] = {
2047 &format_attr_event.attr,
2048 &format_attr_umask.attr,
2049 &format_attr_edge.attr,
2050 &format_attr_inv.attr,
2051 &format_attr_cmask8.attr,
2055 static struct attribute_group nhm_uncore_format_group = {
2057 .attrs = nhm_uncore_formats_attr,
2060 static struct uncore_event_desc nhm_uncore_events[] = {
2061 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
2062 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
2063 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
2064 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
2065 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
2066 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
2067 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
2068 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
2069 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
2070 { /* end: all zeroes */ },
2073 static struct intel_uncore_ops nhm_uncore_msr_ops = {
2074 .disable_box = nhm_uncore_msr_disable_box,
2075 .enable_box = nhm_uncore_msr_enable_box,
2076 .disable_event = snb_uncore_msr_disable_event,
2077 .enable_event = nhm_uncore_msr_enable_event,
2078 .read_counter = uncore_msr_read_counter,
2081 static struct intel_uncore_type nhm_uncore = {
2085 .perf_ctr_bits = 48,
2086 .fixed_ctr_bits = 48,
2087 .event_ctl = NHM_UNC_PERFEVTSEL0,
2088 .perf_ctr = NHM_UNC_UNCORE_PMC0,
2089 .fixed_ctr = NHM_UNC_FIXED_CTR,
2090 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
2091 .event_mask = NHM_UNC_RAW_EVENT_MASK,
2092 .event_descs = nhm_uncore_events,
2093 .ops = &nhm_uncore_msr_ops,
2094 .format_group = &nhm_uncore_format_group,
2097 static struct intel_uncore_type *nhm_msr_uncores[] = {
2101 /* end of Nehalem uncore support */
2103 /* Nehalem-EX uncore support */
2104 DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
2105 DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
2106 DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
2107 DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
2109 static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
2111 wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
2114 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
2116 unsigned msr = uncore_msr_box_ctl(box);
2120 rdmsrl(msr, config);
2121 config &= ~((1ULL << uncore_num_counters(box)) - 1);
2122 /* WBox has a fixed counter */
2123 if (uncore_msr_fixed_ctl(box))
2124 config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
2125 wrmsrl(msr, config);
2129 static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
2131 unsigned msr = uncore_msr_box_ctl(box);
2135 rdmsrl(msr, config);
2136 config |= (1ULL << uncore_num_counters(box)) - 1;
2137 /* WBox has a fixed counter */
2138 if (uncore_msr_fixed_ctl(box))
2139 config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
2140 wrmsrl(msr, config);
2144 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
2146 wrmsrl(event->hw.config_base, 0);
2149 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2151 struct hw_perf_event *hwc = &event->hw;
2153 if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
2154 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
2155 else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
2156 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
2158 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
2161 #define NHMEX_UNCORE_OPS_COMMON_INIT() \
2162 .init_box = nhmex_uncore_msr_init_box, \
2163 .disable_box = nhmex_uncore_msr_disable_box, \
2164 .enable_box = nhmex_uncore_msr_enable_box, \
2165 .disable_event = nhmex_uncore_msr_disable_event, \
2166 .read_counter = uncore_msr_read_counter
2168 static struct intel_uncore_ops nhmex_uncore_ops = {
2169 NHMEX_UNCORE_OPS_COMMON_INIT(),
2170 .enable_event = nhmex_uncore_msr_enable_event,
2173 static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
2174 &format_attr_event.attr,
2175 &format_attr_edge.attr,
2179 static struct attribute_group nhmex_uncore_ubox_format_group = {
2181 .attrs = nhmex_uncore_ubox_formats_attr,
2184 static struct intel_uncore_type nhmex_uncore_ubox = {
2188 .perf_ctr_bits = 48,
2189 .event_ctl = NHMEX_U_MSR_PMON_EV_SEL,
2190 .perf_ctr = NHMEX_U_MSR_PMON_CTR,
2191 .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK,
2192 .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL,
2193 .ops = &nhmex_uncore_ops,
2194 .format_group = &nhmex_uncore_ubox_format_group
2197 static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
2198 &format_attr_event.attr,
2199 &format_attr_umask.attr,
2200 &format_attr_edge.attr,
2201 &format_attr_inv.attr,
2202 &format_attr_thresh8.attr,
2206 static struct attribute_group nhmex_uncore_cbox_format_group = {
2208 .attrs = nhmex_uncore_cbox_formats_attr,
2211 /* msr offset for each instance of cbox */
2212 static unsigned nhmex_cbox_msr_offsets[] = {
2213 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
2216 static struct intel_uncore_type nhmex_uncore_cbox = {
2220 .perf_ctr_bits = 48,
2221 .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0,
2222 .perf_ctr = NHMEX_C0_MSR_PMON_CTR0,
2223 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
2224 .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
2225 .msr_offsets = nhmex_cbox_msr_offsets,
2227 .ops = &nhmex_uncore_ops,
2228 .format_group = &nhmex_uncore_cbox_format_group
2231 static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
2232 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
2233 { /* end: all zeroes */ },
2236 static struct intel_uncore_type nhmex_uncore_wbox = {
2240 .perf_ctr_bits = 48,
2241 .event_ctl = NHMEX_W_MSR_PMON_CNT0,
2242 .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0,
2243 .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR,
2244 .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL,
2245 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
2246 .box_ctl = NHMEX_W_MSR_GLOBAL_CTL,
2248 .event_descs = nhmex_uncore_wbox_events,
2249 .ops = &nhmex_uncore_ops,
2250 .format_group = &nhmex_uncore_cbox_format_group
2253 static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2255 struct hw_perf_event *hwc = &event->hw;
2256 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2257 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2260 ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
2261 NHMEX_B_PMON_CTR_SHIFT;
2262 ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
2263 NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
2265 /* events that do not use the match/mask registers */
2266 if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
2267 (ctr == 2 && ev_sel != 0x4) || ctr == 3)
2270 if (box->pmu->pmu_idx == 0)
2271 reg1->reg = NHMEX_B0_MSR_MATCH;
2273 reg1->reg = NHMEX_B1_MSR_MATCH;
2275 reg1->config = event->attr.config1;
2276 reg2->config = event->attr.config2;
2280 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2282 struct hw_perf_event *hwc = &event->hw;
2283 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2284 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2286 if (reg1->idx != EXTRA_REG_NONE) {
2287 wrmsrl(reg1->reg, reg1->config);
2288 wrmsrl(reg1->reg + 1, reg2->config);
2290 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
2291 (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
2295 * The Bbox has 4 counters, but each counter monitors different events.
2296 * Use bits 6-7 in the event config to select counter.
2298 static struct event_constraint nhmex_uncore_bbox_constraints[] = {
2299 EVENT_CONSTRAINT(0 , 1, 0xc0),
2300 EVENT_CONSTRAINT(0x40, 2, 0xc0),
2301 EVENT_CONSTRAINT(0x80, 4, 0xc0),
2302 EVENT_CONSTRAINT(0xc0, 8, 0xc0),
2303 EVENT_CONSTRAINT_END,
2306 static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
2307 &format_attr_event5.attr,
2308 &format_attr_counter.attr,
2309 &format_attr_match.attr,
2310 &format_attr_mask.attr,
2314 static struct attribute_group nhmex_uncore_bbox_format_group = {
2316 .attrs = nhmex_uncore_bbox_formats_attr,
2319 static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
2320 NHMEX_UNCORE_OPS_COMMON_INIT(),
2321 .enable_event = nhmex_bbox_msr_enable_event,
2322 .hw_config = nhmex_bbox_hw_config,
2323 .get_constraint = uncore_get_constraint,
2324 .put_constraint = uncore_put_constraint,
2327 static struct intel_uncore_type nhmex_uncore_bbox = {
2331 .perf_ctr_bits = 48,
2332 .event_ctl = NHMEX_B0_MSR_PMON_CTL0,
2333 .perf_ctr = NHMEX_B0_MSR_PMON_CTR0,
2334 .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK,
2335 .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
2336 .msr_offset = NHMEX_B_MSR_OFFSET,
2338 .num_shared_regs = 1,
2339 .constraints = nhmex_uncore_bbox_constraints,
2340 .ops = &nhmex_uncore_bbox_ops,
2341 .format_group = &nhmex_uncore_bbox_format_group
2344 static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2346 struct hw_perf_event *hwc = &event->hw;
2347 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2348 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2350 /* only TO_R_PROG_EV event uses the match/mask register */
2351 if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
2352 NHMEX_S_EVENT_TO_R_PROG_EV)
2355 if (box->pmu->pmu_idx == 0)
2356 reg1->reg = NHMEX_S0_MSR_MM_CFG;
2358 reg1->reg = NHMEX_S1_MSR_MM_CFG;
2360 reg1->config = event->attr.config1;
2361 reg2->config = event->attr.config2;
2365 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2367 struct hw_perf_event *hwc = &event->hw;
2368 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2369 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2371 if (reg1->idx != EXTRA_REG_NONE) {
2372 wrmsrl(reg1->reg, 0);
2373 wrmsrl(reg1->reg + 1, reg1->config);
2374 wrmsrl(reg1->reg + 2, reg2->config);
2375 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
2377 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
2380 static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
2381 &format_attr_event.attr,
2382 &format_attr_umask.attr,
2383 &format_attr_edge.attr,
2384 &format_attr_inv.attr,
2385 &format_attr_thresh8.attr,
2386 &format_attr_match.attr,
2387 &format_attr_mask.attr,
2391 static struct attribute_group nhmex_uncore_sbox_format_group = {
2393 .attrs = nhmex_uncore_sbox_formats_attr,
2396 static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
2397 NHMEX_UNCORE_OPS_COMMON_INIT(),
2398 .enable_event = nhmex_sbox_msr_enable_event,
2399 .hw_config = nhmex_sbox_hw_config,
2400 .get_constraint = uncore_get_constraint,
2401 .put_constraint = uncore_put_constraint,
2404 static struct intel_uncore_type nhmex_uncore_sbox = {
2408 .perf_ctr_bits = 48,
2409 .event_ctl = NHMEX_S0_MSR_PMON_CTL0,
2410 .perf_ctr = NHMEX_S0_MSR_PMON_CTR0,
2411 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
2412 .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
2413 .msr_offset = NHMEX_S_MSR_OFFSET,
2415 .num_shared_regs = 1,
2416 .ops = &nhmex_uncore_sbox_ops,
2417 .format_group = &nhmex_uncore_sbox_format_group
2421 EXTRA_REG_NHMEX_M_FILTER,
2422 EXTRA_REG_NHMEX_M_DSP,
2423 EXTRA_REG_NHMEX_M_ISS,
2424 EXTRA_REG_NHMEX_M_MAP,
2425 EXTRA_REG_NHMEX_M_MSC_THR,
2426 EXTRA_REG_NHMEX_M_PGT,
2427 EXTRA_REG_NHMEX_M_PLD,
2428 EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
2431 static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
2432 MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
2433 MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
2434 MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
2435 MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
2436 /* event 0xa uses two extra registers */
2437 MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
2438 MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
2439 MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
2440 /* events 0xd ~ 0x10 use the same extra register */
2441 MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
2442 MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
2443 MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
2444 MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
2445 MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
2446 MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
2447 MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
2448 MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
2449 MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
2453 /* Nehalem-EX or Westmere-EX ? */
2454 static bool uncore_nhmex;
2456 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
2458 struct intel_uncore_extra_reg *er;
2459 unsigned long flags;
2463 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2464 er = &box->shared_regs[idx];
2465 raw_spin_lock_irqsave(&er->lock, flags);
2466 if (!atomic_read(&er->ref) || er->config == config) {
2467 atomic_inc(&er->ref);
2468 er->config = config;
2471 raw_spin_unlock_irqrestore(&er->lock, flags);
2476 * The ZDP_CTL_FVC MSR has 4 fields which are used to control
2477 * events 0xd ~ 0x10. Besides these 4 fields, there are additional
2478 * fields which are shared.
2480 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2481 if (WARN_ON_ONCE(idx >= 4))
2484 /* mask of the shared fields */
2486 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
2488 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
2489 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2491 raw_spin_lock_irqsave(&er->lock, flags);
2492 /* add mask of the non-shared field if it's in use */
2493 if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
2495 mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2497 mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2500 if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
2501 atomic_add(1 << (idx * 8), &er->ref);
2503 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
2504 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2506 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
2507 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2508 er->config &= ~mask;
2509 er->config |= (config & mask);
2512 raw_spin_unlock_irqrestore(&er->lock, flags);
2517 static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
2519 struct intel_uncore_extra_reg *er;
2521 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2522 er = &box->shared_regs[idx];
2523 atomic_dec(&er->ref);
2527 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2528 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2529 atomic_sub(1 << (idx * 8), &er->ref);
2532 static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
2534 struct hw_perf_event *hwc = &event->hw;
2535 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2536 u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
2537 u64 config = reg1->config;
2539 /* get the non-shared control bits and shift them */
2540 idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2542 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2544 config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2545 if (new_idx > orig_idx) {
2546 idx = new_idx - orig_idx;
2549 idx = orig_idx - new_idx;
2553 /* add the shared control bits back */
2555 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2557 config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2558 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2560 /* adjust the main event selector */
2561 if (new_idx > orig_idx)
2562 hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2564 hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2565 reg1->config = config;
2566 reg1->idx = ~0xff | new_idx;
2571 static struct event_constraint *
2572 nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2574 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2575 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2576 int i, idx[2], alloc = 0;
2577 u64 config1 = reg1->config;
2579 idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
2580 idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
2582 for (i = 0; i < 2; i++) {
2583 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
2589 if (!nhmex_mbox_get_shared_reg(box, idx[i],
2590 __BITS_VALUE(config1, i, 32)))
2592 alloc |= (0x1 << i);
2595 /* for the match/mask registers */
2596 if (reg2->idx != EXTRA_REG_NONE &&
2597 (uncore_box_is_fake(box) || !reg2->alloc) &&
2598 !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
2602 * If it's a fake box -- as per validate_{group,event}() we
2603 * shouldn't touch event state and we can avoid doing so
2604 * since both will only call get_event_constraints() once
2605 * on each event, this avoids the need for reg->alloc.
2607 if (!uncore_box_is_fake(box)) {
2608 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
2609 nhmex_mbox_alter_er(event, idx[0], true);
2610 reg1->alloc |= alloc;
2611 if (reg2->idx != EXTRA_REG_NONE)
2616 if (idx[0] != 0xff && !(alloc & 0x1) &&
2617 idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2619 * events 0xd ~ 0x10 are functional identical, but are
2620 * controlled by different fields in the ZDP_CTL_FVC
2621 * register. If we failed to take one field, try the
2624 BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
2625 idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2626 idx[0] = (idx[0] + 1) % 4;
2627 idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2628 if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
2629 config1 = nhmex_mbox_alter_er(event, idx[0], false);
2635 nhmex_mbox_put_shared_reg(box, idx[0]);
2637 nhmex_mbox_put_shared_reg(box, idx[1]);
2638 return &constraint_empty;
2641 static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
2643 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2644 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2646 if (uncore_box_is_fake(box))
2649 if (reg1->alloc & 0x1)
2650 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
2651 if (reg1->alloc & 0x2)
2652 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
2656 nhmex_mbox_put_shared_reg(box, reg2->idx);
2661 static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
2663 if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2665 return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
2668 static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2670 struct intel_uncore_type *type = box->pmu->type;
2671 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2672 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2673 struct extra_reg *er;
2677 * The mbox events may require 2 extra MSRs at the most. But only
2678 * the lower 32 bits in these MSRs are significant, so we can use
2679 * config1 to pass two MSRs' config.
2681 for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
2682 if (er->event != (event->hw.config & er->config_mask))
2684 if (event->attr.config1 & ~er->valid_mask)
2687 msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
2688 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
2691 /* always use the 32~63 bits to pass the PLD config */
2692 if (er->idx == EXTRA_REG_NHMEX_M_PLD)
2694 else if (WARN_ON_ONCE(reg_idx > 0))
2697 reg1->idx &= ~(0xff << (reg_idx * 8));
2698 reg1->reg &= ~(0xffff << (reg_idx * 16));
2699 reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
2700 reg1->reg |= msr << (reg_idx * 16);
2701 reg1->config = event->attr.config1;
2705 * The mbox only provides ability to perform address matching
2706 * for the PLD events.
2709 reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
2710 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
2711 reg2->config = event->attr.config2;
2713 reg2->config = ~0ULL;
2714 if (box->pmu->pmu_idx == 0)
2715 reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
2717 reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
2722 static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
2724 struct intel_uncore_extra_reg *er;
2725 unsigned long flags;
2728 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2729 return box->shared_regs[idx].config;
2731 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2732 raw_spin_lock_irqsave(&er->lock, flags);
2733 config = er->config;
2734 raw_spin_unlock_irqrestore(&er->lock, flags);
2738 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2740 struct hw_perf_event *hwc = &event->hw;
2741 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2742 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2745 idx = __BITS_VALUE(reg1->idx, 0, 8);
2747 wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
2748 nhmex_mbox_shared_reg_config(box, idx));
2749 idx = __BITS_VALUE(reg1->idx, 1, 8);
2751 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
2752 nhmex_mbox_shared_reg_config(box, idx));
2754 if (reg2->idx != EXTRA_REG_NONE) {
2755 wrmsrl(reg2->reg, 0);
2756 if (reg2->config != ~0ULL) {
2757 wrmsrl(reg2->reg + 1,
2758 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
2759 wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
2760 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
2761 wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
2765 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
2768 DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
2769 DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
2770 DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
2771 DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
2772 DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
2773 DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
2774 DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63");
2775 DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
2776 DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
2777 DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
2778 DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
2779 DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
2780 DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
2781 DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
2782 DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
2783 DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
2785 static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
2786 &format_attr_count_mode.attr,
2787 &format_attr_storage_mode.attr,
2788 &format_attr_wrap_mode.attr,
2789 &format_attr_flag_mode.attr,
2790 &format_attr_inc_sel.attr,
2791 &format_attr_set_flag_sel.attr,
2792 &format_attr_filter_cfg_en.attr,
2793 &format_attr_filter_match.attr,
2794 &format_attr_filter_mask.attr,
2795 &format_attr_dsp.attr,
2796 &format_attr_thr.attr,
2797 &format_attr_fvc.attr,
2798 &format_attr_pgt.attr,
2799 &format_attr_map.attr,
2800 &format_attr_iss.attr,
2801 &format_attr_pld.attr,
2805 static struct attribute_group nhmex_uncore_mbox_format_group = {
2807 .attrs = nhmex_uncore_mbox_formats_attr,
2810 static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
2811 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
2812 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
2813 { /* end: all zeroes */ },
2816 static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
2817 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
2818 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
2819 { /* end: all zeroes */ },
2822 static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
2823 NHMEX_UNCORE_OPS_COMMON_INIT(),
2824 .enable_event = nhmex_mbox_msr_enable_event,
2825 .hw_config = nhmex_mbox_hw_config,
2826 .get_constraint = nhmex_mbox_get_constraint,
2827 .put_constraint = nhmex_mbox_put_constraint,
2830 static struct intel_uncore_type nhmex_uncore_mbox = {
2834 .perf_ctr_bits = 48,
2835 .event_ctl = NHMEX_M0_MSR_PMU_CTL0,
2836 .perf_ctr = NHMEX_M0_MSR_PMU_CNT0,
2837 .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK,
2838 .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL,
2839 .msr_offset = NHMEX_M_MSR_OFFSET,
2841 .num_shared_regs = 8,
2842 .event_descs = nhmex_uncore_mbox_events,
2843 .ops = &nhmex_uncore_mbox_ops,
2844 .format_group = &nhmex_uncore_mbox_format_group,
2847 static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
2849 struct hw_perf_event *hwc = &event->hw;
2850 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2852 /* adjust the main event selector and extra register index */
2853 if (reg1->idx % 2) {
2855 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2858 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2861 /* adjust extra register config */
2862 switch (reg1->idx % 6) {
2864 /* shift the 8~15 bits to the 0~7 bits */
2868 /* shift the 0~7 bits to the 8~15 bits */
2875 * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
2876 * An event set consists of 6 events, the 3rd and 4th events in
2877 * an event set use the same extra register. So an event set uses
2878 * 5 extra registers.
2880 static struct event_constraint *
2881 nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2883 struct hw_perf_event *hwc = &event->hw;
2884 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2885 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2886 struct intel_uncore_extra_reg *er;
2887 unsigned long flags;
2892 if (!uncore_box_is_fake(box) && reg1->alloc)
2895 idx = reg1->idx % 6;
2896 config1 = reg1->config;
2899 /* the 3rd and 4th events use the same extra register */
2902 er_idx += (reg1->idx / 6) * 5;
2904 er = &box->shared_regs[er_idx];
2905 raw_spin_lock_irqsave(&er->lock, flags);
2907 if (!atomic_read(&er->ref) || er->config == reg1->config) {
2908 atomic_inc(&er->ref);
2909 er->config = reg1->config;
2912 } else if (idx == 2 || idx == 3) {
2914 * these two events use different fields in a extra register,
2915 * the 0~7 bits and the 8~15 bits respectively.
2917 u64 mask = 0xff << ((idx - 2) * 8);
2918 if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
2919 !((er->config ^ config1) & mask)) {
2920 atomic_add(1 << ((idx - 2) * 8), &er->ref);
2921 er->config &= ~mask;
2922 er->config |= config1 & mask;
2926 if (!atomic_read(&er->ref) ||
2927 (er->config == (hwc->config >> 32) &&
2928 er->config1 == reg1->config &&
2929 er->config2 == reg2->config)) {
2930 atomic_inc(&er->ref);
2931 er->config = (hwc->config >> 32);
2932 er->config1 = reg1->config;
2933 er->config2 = reg2->config;
2937 raw_spin_unlock_irqrestore(&er->lock, flags);
2941 * The Rbox events are always in pairs. The paired
2942 * events are functional identical, but use different
2943 * extra registers. If we failed to take an extra
2944 * register, try the alternative.
2950 if (idx != reg1->idx % 6) {
2958 if (!uncore_box_is_fake(box)) {
2959 if (idx != reg1->idx % 6)
2960 nhmex_rbox_alter_er(box, event);
2965 return &constraint_empty;
2968 static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
2970 struct intel_uncore_extra_reg *er;
2971 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2974 if (uncore_box_is_fake(box) || !reg1->alloc)
2977 idx = reg1->idx % 6;
2981 er_idx += (reg1->idx / 6) * 5;
2983 er = &box->shared_regs[er_idx];
2984 if (idx == 2 || idx == 3)
2985 atomic_sub(1 << ((idx - 2) * 8), &er->ref);
2987 atomic_dec(&er->ref);
2992 static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2994 struct hw_perf_event *hwc = &event->hw;
2995 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2996 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2999 idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
3000 NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
3005 reg1->config = event->attr.config1;
3010 hwc->config |= event->attr.config & (~0ULL << 32);
3011 reg2->config = event->attr.config2;
3017 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
3019 struct hw_perf_event *hwc = &event->hw;
3020 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
3021 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
3025 port = idx / 6 + box->pmu->pmu_idx * 4;
3029 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
3032 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
3036 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
3037 uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
3040 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
3042 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
3043 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
3046 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
3048 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
3049 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
3053 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
3054 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
3057 DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
3058 DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
3059 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
3060 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
3061 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
3063 static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
3064 &format_attr_event5.attr,
3065 &format_attr_xbr_mm_cfg.attr,
3066 &format_attr_xbr_match.attr,
3067 &format_attr_xbr_mask.attr,
3068 &format_attr_qlx_cfg.attr,
3069 &format_attr_iperf_cfg.attr,
3073 static struct attribute_group nhmex_uncore_rbox_format_group = {
3075 .attrs = nhmex_uncore_rbox_formats_attr,
3078 static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
3079 INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"),
3080 INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"),
3081 INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"),
3082 INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"),
3083 INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"),
3084 INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"),
3085 { /* end: all zeroes */ },
3088 static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
3089 NHMEX_UNCORE_OPS_COMMON_INIT(),
3090 .enable_event = nhmex_rbox_msr_enable_event,
3091 .hw_config = nhmex_rbox_hw_config,
3092 .get_constraint = nhmex_rbox_get_constraint,
3093 .put_constraint = nhmex_rbox_put_constraint,
3096 static struct intel_uncore_type nhmex_uncore_rbox = {
3100 .perf_ctr_bits = 48,
3101 .event_ctl = NHMEX_R_MSR_PMON_CTL0,
3102 .perf_ctr = NHMEX_R_MSR_PMON_CNT0,
3103 .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK,
3104 .box_ctl = NHMEX_R_MSR_GLOBAL_CTL,
3105 .msr_offset = NHMEX_R_MSR_OFFSET,
3107 .num_shared_regs = 20,
3108 .event_descs = nhmex_uncore_rbox_events,
3109 .ops = &nhmex_uncore_rbox_ops,
3110 .format_group = &nhmex_uncore_rbox_format_group
3113 static struct intel_uncore_type *nhmex_msr_uncores[] = {
3123 /* end of Nehalem-EX uncore support */
3125 static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
3127 struct hw_perf_event *hwc = &event->hw;
3130 hwc->last_tag = ++box->tags[idx];
3132 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
3133 hwc->event_base = uncore_fixed_ctr(box);
3134 hwc->config_base = uncore_fixed_ctl(box);
3138 hwc->config_base = uncore_event_ctl(box, hwc->idx);
3139 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
3142 static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
3144 u64 prev_count, new_count, delta;
3147 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
3148 shift = 64 - uncore_fixed_ctr_bits(box);
3150 shift = 64 - uncore_perf_ctr_bits(box);
3152 /* the hrtimer might modify the previous event value */
3154 prev_count = local64_read(&event->hw.prev_count);
3155 new_count = uncore_read_counter(box, event);
3156 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
3159 delta = (new_count << shift) - (prev_count << shift);
3162 local64_add(delta, &event->count);
3166 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
3167 * for SandyBridge. So we use hrtimer to periodically poll the counter
3168 * to avoid overflow.
3170 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
3172 struct intel_uncore_box *box;
3173 struct perf_event *event;
3174 unsigned long flags;
3177 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
3178 if (!box->n_active || box->cpu != smp_processor_id())
3179 return HRTIMER_NORESTART;
3181 * disable local interrupt to prevent uncore_pmu_event_start/stop
3182 * to interrupt the update process
3184 local_irq_save(flags);
3187 * handle boxes with an active event list as opposed to active
3190 list_for_each_entry(event, &box->active_list, active_entry) {
3191 uncore_perf_event_update(box, event);
3194 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
3195 uncore_perf_event_update(box, box->events[bit]);
3197 local_irq_restore(flags);
3199 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
3200 return HRTIMER_RESTART;
3203 static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
3205 __hrtimer_start_range_ns(&box->hrtimer,
3206 ns_to_ktime(box->hrtimer_duration), 0,
3207 HRTIMER_MODE_REL_PINNED, 0);
3210 static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
3212 hrtimer_cancel(&box->hrtimer);
3215 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
3217 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3218 box->hrtimer.function = uncore_pmu_hrtimer;
3221 static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
3223 struct intel_uncore_box *box;
3226 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
3228 box = kzalloc_node(size, GFP_KERNEL, node);
3232 for (i = 0; i < type->num_shared_regs; i++)
3233 raw_spin_lock_init(&box->shared_regs[i].lock);
3235 uncore_pmu_init_hrtimer(box);
3236 atomic_set(&box->refcnt, 1);
3240 /* set default hrtimer timeout */
3241 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
3243 INIT_LIST_HEAD(&box->active_list);
3249 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
3251 struct perf_event *event;
3254 max_count = box->pmu->type->num_counters;
3255 if (box->pmu->type->fixed_ctl)
3258 if (box->n_events >= max_count)
3262 box->event_list[n] = leader;
3267 list_for_each_entry(event, &leader->sibling_list, group_entry) {
3268 if (event->state <= PERF_EVENT_STATE_OFF)
3274 box->event_list[n] = event;
3280 static struct event_constraint *
3281 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
3283 struct intel_uncore_type *type = box->pmu->type;
3284 struct event_constraint *c;
3286 if (type->ops->get_constraint) {
3287 c = type->ops->get_constraint(box, event);
3292 if (event->attr.config == UNCORE_FIXED_EVENT)
3293 return &constraint_fixed;
3295 if (type->constraints) {
3296 for_each_event_constraint(c, type->constraints) {
3297 if ((event->hw.config & c->cmask) == c->code)
3302 return &type->unconstrainted;
3305 static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
3307 if (box->pmu->type->ops->put_constraint)
3308 box->pmu->type->ops->put_constraint(box, event);
3311 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
3313 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
3314 struct event_constraint *c;
3315 int i, wmin, wmax, ret = 0;
3316 struct hw_perf_event *hwc;
3318 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
3320 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
3321 hwc = &box->event_list[i]->hw;
3322 c = uncore_get_event_constraint(box, box->event_list[i]);
3323 hwc->constraint = c;
3324 wmin = min(wmin, c->weight);
3325 wmax = max(wmax, c->weight);
3328 /* fastpath, try to reuse previous register */
3329 for (i = 0; i < n; i++) {
3330 hwc = &box->event_list[i]->hw;
3331 c = hwc->constraint;
3333 /* never assigned */
3337 /* constraint still honored */
3338 if (!test_bit(hwc->idx, c->idxmsk))
3341 /* not already used */
3342 if (test_bit(hwc->idx, used_mask))
3345 __set_bit(hwc->idx, used_mask);
3347 assign[i] = hwc->idx;
3351 ret = perf_assign_events(box->event_list, n,
3352 wmin, wmax, assign);
3354 if (!assign || ret) {
3355 for (i = 0; i < n; i++)
3356 uncore_put_event_constraint(box, box->event_list[i]);
3358 return ret ? -EINVAL : 0;
3361 static void uncore_pmu_event_start(struct perf_event *event, int flags)
3363 struct intel_uncore_box *box = uncore_event_to_box(event);
3364 int idx = event->hw.idx;
3366 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
3369 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
3372 event->hw.state = 0;
3373 box->events[idx] = event;
3375 __set_bit(idx, box->active_mask);
3377 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
3378 uncore_enable_event(box, event);
3380 if (box->n_active == 1) {
3381 uncore_enable_box(box);
3382 uncore_pmu_start_hrtimer(box);
3386 static void uncore_pmu_event_stop(struct perf_event *event, int flags)
3388 struct intel_uncore_box *box = uncore_event_to_box(event);
3389 struct hw_perf_event *hwc = &event->hw;
3391 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
3392 uncore_disable_event(box, event);
3394 box->events[hwc->idx] = NULL;
3395 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
3396 hwc->state |= PERF_HES_STOPPED;
3398 if (box->n_active == 0) {
3399 uncore_disable_box(box);
3400 uncore_pmu_cancel_hrtimer(box);
3404 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
3406 * Drain the remaining delta count out of a event
3407 * that we are disabling:
3409 uncore_perf_event_update(box, event);
3410 hwc->state |= PERF_HES_UPTODATE;
3414 static int uncore_pmu_event_add(struct perf_event *event, int flags)
3416 struct intel_uncore_box *box = uncore_event_to_box(event);
3417 struct hw_perf_event *hwc = &event->hw;
3418 int assign[UNCORE_PMC_IDX_MAX];
3424 ret = n = uncore_collect_events(box, event, false);
3428 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
3429 if (!(flags & PERF_EF_START))
3430 hwc->state |= PERF_HES_ARCH;
3432 ret = uncore_assign_events(box, assign, n);
3436 /* save events moving to new counters */
3437 for (i = 0; i < box->n_events; i++) {
3438 event = box->event_list[i];
3441 if (hwc->idx == assign[i] &&
3442 hwc->last_tag == box->tags[assign[i]])
3445 * Ensure we don't accidentally enable a stopped
3446 * counter simply because we rescheduled.
3448 if (hwc->state & PERF_HES_STOPPED)
3449 hwc->state |= PERF_HES_ARCH;
3451 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3454 /* reprogram moved events into new counters */
3455 for (i = 0; i < n; i++) {
3456 event = box->event_list[i];
3459 if (hwc->idx != assign[i] ||
3460 hwc->last_tag != box->tags[assign[i]])
3461 uncore_assign_hw_event(box, event, assign[i]);
3462 else if (i < box->n_events)
3465 if (hwc->state & PERF_HES_ARCH)
3468 uncore_pmu_event_start(event, 0);
3475 static void uncore_pmu_event_del(struct perf_event *event, int flags)
3477 struct intel_uncore_box *box = uncore_event_to_box(event);
3480 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3482 for (i = 0; i < box->n_events; i++) {
3483 if (event == box->event_list[i]) {
3484 uncore_put_event_constraint(box, event);
3486 while (++i < box->n_events)
3487 box->event_list[i - 1] = box->event_list[i];
3495 event->hw.last_tag = ~0ULL;
3498 static void uncore_pmu_event_read(struct perf_event *event)
3500 struct intel_uncore_box *box = uncore_event_to_box(event);
3501 uncore_perf_event_update(box, event);
3505 * validation ensures the group can be loaded onto the
3506 * PMU if it was the only group available.
3508 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
3509 struct perf_event *event)
3511 struct perf_event *leader = event->group_leader;
3512 struct intel_uncore_box *fake_box;
3513 int ret = -EINVAL, n;
3515 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
3519 fake_box->pmu = pmu;
3521 * the event is not yet connected with its
3522 * siblings therefore we must first collect
3523 * existing siblings, then add the new event
3524 * before we can simulate the scheduling
3526 n = uncore_collect_events(fake_box, leader, true);
3530 fake_box->n_events = n;
3531 n = uncore_collect_events(fake_box, event, false);
3535 fake_box->n_events = n;
3537 ret = uncore_assign_events(fake_box, NULL, n);
3543 static int uncore_pmu_event_init(struct perf_event *event)
3545 struct intel_uncore_pmu *pmu;
3546 struct intel_uncore_box *box;
3547 struct hw_perf_event *hwc = &event->hw;
3550 if (event->attr.type != event->pmu->type)
3553 pmu = uncore_event_to_pmu(event);
3554 /* no device found for this pmu */
3555 if (pmu->func_id < 0)
3559 * Uncore PMU does measure at all privilege level all the time.
3560 * So it doesn't make sense to specify any exclude bits.
3562 if (event->attr.exclude_user || event->attr.exclude_kernel ||
3563 event->attr.exclude_hv || event->attr.exclude_idle)
3566 /* Sampling not supported yet */
3567 if (hwc->sample_period)
3571 * Place all uncore events for a particular physical package
3576 box = uncore_pmu_to_box(pmu, event->cpu);
3577 if (!box || box->cpu < 0)
3579 event->cpu = box->cpu;
3582 event->hw.last_tag = ~0ULL;
3583 event->hw.extra_reg.idx = EXTRA_REG_NONE;
3584 event->hw.branch_reg.idx = EXTRA_REG_NONE;
3586 if (event->attr.config == UNCORE_FIXED_EVENT) {
3587 /* no fixed counter */
3588 if (!pmu->type->fixed_ctl)
3591 * if there is only one fixed counter, only the first pmu
3592 * can access the fixed counter
3594 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
3597 /* fixed counters have event field hardcoded to zero */
3600 hwc->config = event->attr.config & pmu->type->event_mask;
3601 if (pmu->type->ops->hw_config) {
3602 ret = pmu->type->ops->hw_config(box, event);
3608 if (event->group_leader != event)
3609 ret = uncore_validate_group(pmu, event);
3616 static ssize_t uncore_get_attr_cpumask(struct device *dev,
3617 struct device_attribute *attr, char *buf)
3619 int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &uncore_cpu_mask);
3626 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
3628 static struct attribute *uncore_pmu_attrs[] = {
3629 &dev_attr_cpumask.attr,
3633 static struct attribute_group uncore_pmu_attr_group = {
3634 .attrs = uncore_pmu_attrs,
3637 static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
3641 if (!pmu->type->pmu) {
3642 pmu->pmu = (struct pmu) {
3643 .attr_groups = pmu->type->attr_groups,
3644 .task_ctx_nr = perf_invalid_context,
3645 .event_init = uncore_pmu_event_init,
3646 .add = uncore_pmu_event_add,
3647 .del = uncore_pmu_event_del,
3648 .start = uncore_pmu_event_start,
3649 .stop = uncore_pmu_event_stop,
3650 .read = uncore_pmu_event_read,
3653 pmu->pmu = *pmu->type->pmu;
3654 pmu->pmu.attr_groups = pmu->type->attr_groups;
3657 if (pmu->type->num_boxes == 1) {
3658 if (strlen(pmu->type->name) > 0)
3659 sprintf(pmu->name, "uncore_%s", pmu->type->name);
3661 sprintf(pmu->name, "uncore");
3663 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
3667 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
3671 static void __init uncore_type_exit(struct intel_uncore_type *type)
3675 for (i = 0; i < type->num_boxes; i++)
3676 free_percpu(type->pmus[i].box);
3679 kfree(type->events_group);
3680 type->events_group = NULL;
3683 static void __init uncore_types_exit(struct intel_uncore_type **types)
3686 for (i = 0; types[i]; i++)
3687 uncore_type_exit(types[i]);
3690 static int __init uncore_type_init(struct intel_uncore_type *type)
3692 struct intel_uncore_pmu *pmus;
3693 struct attribute_group *attr_group;
3694 struct attribute **attrs;
3697 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
3701 type->unconstrainted = (struct event_constraint)
3702 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
3703 0, type->num_counters, 0, 0);
3705 for (i = 0; i < type->num_boxes; i++) {
3706 pmus[i].func_id = -1;
3707 pmus[i].pmu_idx = i;
3708 pmus[i].type = type;
3709 INIT_LIST_HEAD(&pmus[i].box_list);
3710 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
3715 if (type->event_descs) {
3717 while (type->event_descs[i].attr.attr.name)
3720 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
3721 sizeof(*attr_group), GFP_KERNEL);
3725 attrs = (struct attribute **)(attr_group + 1);
3726 attr_group->name = "events";
3727 attr_group->attrs = attrs;
3729 for (j = 0; j < i; j++)
3730 attrs[j] = &type->event_descs[j].attr.attr;
3732 type->events_group = attr_group;
3735 type->pmu_group = &uncore_pmu_attr_group;
3739 uncore_type_exit(type);
3743 static int __init uncore_types_init(struct intel_uncore_type **types)
3747 for (i = 0; types[i]; i++) {
3748 ret = uncore_type_init(types[i]);
3755 uncore_type_exit(types[i]);
3759 static struct pci_driver *uncore_pci_driver;
3760 static bool pcidrv_registered;
3763 * add a pci uncore device
3765 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3767 struct intel_uncore_pmu *pmu;
3768 struct intel_uncore_box *box;
3769 struct intel_uncore_type *type;
3772 phys_id = pcibus_to_physid[pdev->bus->number];
3776 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
3777 extra_pci_dev[phys_id][UNCORE_PCI_DEV_IDX(id->driver_data)] = pdev;
3778 pci_set_drvdata(pdev, NULL);
3782 type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
3783 box = uncore_alloc_box(type, NUMA_NO_NODE);
3788 * for performance monitoring unit with multiple boxes,
3789 * each box has a different function id.
3791 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
3792 if (pmu->func_id < 0)
3793 pmu->func_id = pdev->devfn;
3795 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
3797 box->phys_id = phys_id;
3798 box->pci_dev = pdev;
3800 uncore_box_init(box);
3801 pci_set_drvdata(pdev, box);
3803 raw_spin_lock(&uncore_box_lock);
3804 list_add_tail(&box->list, &pmu->box_list);
3805 raw_spin_unlock(&uncore_box_lock);
3810 static void uncore_pci_remove(struct pci_dev *pdev)
3812 struct intel_uncore_box *box = pci_get_drvdata(pdev);
3813 struct intel_uncore_pmu *pmu;
3814 int i, cpu, phys_id = pcibus_to_physid[pdev->bus->number];
3816 box = pci_get_drvdata(pdev);
3818 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
3819 if (extra_pci_dev[phys_id][i] == pdev) {
3820 extra_pci_dev[phys_id][i] = NULL;
3824 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
3829 if (WARN_ON_ONCE(phys_id != box->phys_id))
3832 pci_set_drvdata(pdev, NULL);
3834 raw_spin_lock(&uncore_box_lock);
3835 list_del(&box->list);
3836 raw_spin_unlock(&uncore_box_lock);
3838 for_each_possible_cpu(cpu) {
3839 if (*per_cpu_ptr(pmu->box, cpu) == box) {
3840 *per_cpu_ptr(pmu->box, cpu) = NULL;
3841 atomic_dec(&box->refcnt);
3845 WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
3849 static int __init uncore_pci_init(void)
3853 switch (boot_cpu_data.x86_model) {
3854 case 45: /* Sandy Bridge-EP */
3855 ret = snbep_pci2phy_map_init(0x3ce0);
3858 pci_uncores = snbep_pci_uncores;
3859 uncore_pci_driver = &snbep_uncore_pci_driver;
3861 case 62: /* IvyTown */
3862 ret = snbep_pci2phy_map_init(0x0e1e);
3865 pci_uncores = ivt_pci_uncores;
3866 uncore_pci_driver = &ivt_uncore_pci_driver;
3868 case 42: /* Sandy Bridge */
3869 ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_SNB_IMC);
3872 pci_uncores = snb_pci_uncores;
3873 uncore_pci_driver = &snb_uncore_pci_driver;
3875 case 58: /* Ivy Bridge */
3876 ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_IVB_IMC);
3879 pci_uncores = snb_pci_uncores;
3880 uncore_pci_driver = &ivb_uncore_pci_driver;
3882 case 60: /* Haswell */
3883 case 69: /* Haswell Celeron */
3884 ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_HSW_IMC);
3887 pci_uncores = snb_pci_uncores;
3888 uncore_pci_driver = &hsw_uncore_pci_driver;
3894 ret = uncore_types_init(pci_uncores);
3898 uncore_pci_driver->probe = uncore_pci_probe;
3899 uncore_pci_driver->remove = uncore_pci_remove;
3901 ret = pci_register_driver(uncore_pci_driver);
3903 pcidrv_registered = true;
3905 uncore_types_exit(pci_uncores);
3910 static void __init uncore_pci_exit(void)
3912 if (pcidrv_registered) {
3913 pcidrv_registered = false;
3914 pci_unregister_driver(uncore_pci_driver);
3915 uncore_types_exit(pci_uncores);
3919 /* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3920 static LIST_HEAD(boxes_to_free);
3922 static void uncore_kfree_boxes(void)
3924 struct intel_uncore_box *box;
3926 while (!list_empty(&boxes_to_free)) {
3927 box = list_entry(boxes_to_free.next,
3928 struct intel_uncore_box, list);
3929 list_del(&box->list);
3934 static void uncore_cpu_dying(int cpu)
3936 struct intel_uncore_type *type;
3937 struct intel_uncore_pmu *pmu;
3938 struct intel_uncore_box *box;
3941 for (i = 0; msr_uncores[i]; i++) {
3942 type = msr_uncores[i];
3943 for (j = 0; j < type->num_boxes; j++) {
3944 pmu = &type->pmus[j];
3945 box = *per_cpu_ptr(pmu->box, cpu);
3946 *per_cpu_ptr(pmu->box, cpu) = NULL;
3947 if (box && atomic_dec_and_test(&box->refcnt))
3948 list_add(&box->list, &boxes_to_free);
3953 static int uncore_cpu_starting(int cpu)
3955 struct intel_uncore_type *type;
3956 struct intel_uncore_pmu *pmu;
3957 struct intel_uncore_box *box, *exist;
3958 int i, j, k, phys_id;
3960 phys_id = topology_physical_package_id(cpu);
3962 for (i = 0; msr_uncores[i]; i++) {
3963 type = msr_uncores[i];
3964 for (j = 0; j < type->num_boxes; j++) {
3965 pmu = &type->pmus[j];
3966 box = *per_cpu_ptr(pmu->box, cpu);
3967 /* called by uncore_cpu_init? */
3968 if (box && box->phys_id >= 0) {
3969 uncore_box_init(box);
3973 for_each_online_cpu(k) {
3974 exist = *per_cpu_ptr(pmu->box, k);
3975 if (exist && exist->phys_id == phys_id) {
3976 atomic_inc(&exist->refcnt);
3977 *per_cpu_ptr(pmu->box, cpu) = exist;
3979 list_add(&box->list,
3988 box->phys_id = phys_id;
3989 uncore_box_init(box);
3996 static int uncore_cpu_prepare(int cpu, int phys_id)
3998 struct intel_uncore_type *type;
3999 struct intel_uncore_pmu *pmu;
4000 struct intel_uncore_box *box;
4003 for (i = 0; msr_uncores[i]; i++) {
4004 type = msr_uncores[i];
4005 for (j = 0; j < type->num_boxes; j++) {
4006 pmu = &type->pmus[j];
4007 if (pmu->func_id < 0)
4010 box = uncore_alloc_box(type, cpu_to_node(cpu));
4015 box->phys_id = phys_id;
4016 *per_cpu_ptr(pmu->box, cpu) = box;
4023 uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
4025 struct intel_uncore_type *type;
4026 struct intel_uncore_pmu *pmu;
4027 struct intel_uncore_box *box;
4030 for (i = 0; uncores[i]; i++) {
4032 for (j = 0; j < type->num_boxes; j++) {
4033 pmu = &type->pmus[j];
4035 box = uncore_pmu_to_box(pmu, new_cpu);
4037 box = uncore_pmu_to_box(pmu, old_cpu);
4042 WARN_ON_ONCE(box->cpu != -1);
4047 WARN_ON_ONCE(box->cpu != old_cpu);
4049 uncore_pmu_cancel_hrtimer(box);
4050 perf_pmu_migrate_context(&pmu->pmu,
4060 static void uncore_event_exit_cpu(int cpu)
4062 int i, phys_id, target;
4064 /* if exiting cpu is used for collecting uncore events */
4065 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
4068 /* find a new cpu to collect uncore events */
4069 phys_id = topology_physical_package_id(cpu);
4071 for_each_online_cpu(i) {
4074 if (phys_id == topology_physical_package_id(i)) {
4080 /* migrate uncore events to the new cpu */
4082 cpumask_set_cpu(target, &uncore_cpu_mask);
4084 uncore_change_context(msr_uncores, cpu, target);
4085 uncore_change_context(pci_uncores, cpu, target);
4088 static void uncore_event_init_cpu(int cpu)
4092 phys_id = topology_physical_package_id(cpu);
4093 for_each_cpu(i, &uncore_cpu_mask) {
4094 if (phys_id == topology_physical_package_id(i))
4098 cpumask_set_cpu(cpu, &uncore_cpu_mask);
4100 uncore_change_context(msr_uncores, -1, cpu);
4101 uncore_change_context(pci_uncores, -1, cpu);
4104 static int uncore_cpu_notifier(struct notifier_block *self,
4105 unsigned long action, void *hcpu)
4107 unsigned int cpu = (long)hcpu;
4109 /* allocate/free data structure for uncore box */
4110 switch (action & ~CPU_TASKS_FROZEN) {
4111 case CPU_UP_PREPARE:
4112 uncore_cpu_prepare(cpu, -1);
4115 uncore_cpu_starting(cpu);
4117 case CPU_UP_CANCELED:
4119 uncore_cpu_dying(cpu);
4123 uncore_kfree_boxes();
4129 /* select the cpu that collects uncore events */
4130 switch (action & ~CPU_TASKS_FROZEN) {
4131 case CPU_DOWN_FAILED:
4133 uncore_event_init_cpu(cpu);
4135 case CPU_DOWN_PREPARE:
4136 uncore_event_exit_cpu(cpu);
4145 static struct notifier_block uncore_cpu_nb = {
4146 .notifier_call = uncore_cpu_notifier,
4148 * to migrate uncore events, our notifier should be executed
4149 * before perf core's notifier.
4151 .priority = CPU_PRI_PERF + 1,
4154 static void __init uncore_cpu_setup(void *dummy)
4156 uncore_cpu_starting(smp_processor_id());
4159 static int __init uncore_cpu_init(void)
4163 max_cores = boot_cpu_data.x86_max_cores;
4164 switch (boot_cpu_data.x86_model) {
4165 case 26: /* Nehalem */
4167 case 37: /* Westmere */
4169 msr_uncores = nhm_msr_uncores;
4171 case 42: /* Sandy Bridge */
4172 case 58: /* Ivy Bridge */
4173 if (snb_uncore_cbox.num_boxes > max_cores)
4174 snb_uncore_cbox.num_boxes = max_cores;
4175 msr_uncores = snb_msr_uncores;
4177 case 45: /* Sandy Bridge-EP */
4178 if (snbep_uncore_cbox.num_boxes > max_cores)
4179 snbep_uncore_cbox.num_boxes = max_cores;
4180 msr_uncores = snbep_msr_uncores;
4182 case 46: /* Nehalem-EX */
4183 uncore_nhmex = true;
4184 case 47: /* Westmere-EX aka. Xeon E7 */
4186 nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
4187 if (nhmex_uncore_cbox.num_boxes > max_cores)
4188 nhmex_uncore_cbox.num_boxes = max_cores;
4189 msr_uncores = nhmex_msr_uncores;
4191 case 62: /* IvyTown */
4192 if (ivt_uncore_cbox.num_boxes > max_cores)
4193 ivt_uncore_cbox.num_boxes = max_cores;
4194 msr_uncores = ivt_msr_uncores;
4201 ret = uncore_types_init(msr_uncores);
4208 static int __init uncore_pmus_register(void)
4210 struct intel_uncore_pmu *pmu;
4211 struct intel_uncore_type *type;
4214 for (i = 0; msr_uncores[i]; i++) {
4215 type = msr_uncores[i];
4216 for (j = 0; j < type->num_boxes; j++) {
4217 pmu = &type->pmus[j];
4218 uncore_pmu_register(pmu);
4222 for (i = 0; pci_uncores[i]; i++) {
4223 type = pci_uncores[i];
4224 for (j = 0; j < type->num_boxes; j++) {
4225 pmu = &type->pmus[j];
4226 uncore_pmu_register(pmu);
4233 static void __init uncore_cpumask_init(void)
4238 * ony invoke once from msr or pci init code
4240 if (!cpumask_empty(&uncore_cpu_mask))
4245 for_each_online_cpu(cpu) {
4246 int i, phys_id = topology_physical_package_id(cpu);
4248 for_each_cpu(i, &uncore_cpu_mask) {
4249 if (phys_id == topology_physical_package_id(i)) {
4257 uncore_cpu_prepare(cpu, phys_id);
4258 uncore_event_init_cpu(cpu);
4260 on_each_cpu(uncore_cpu_setup, NULL, 1);
4262 register_cpu_notifier(&uncore_cpu_nb);
4268 static int __init intel_uncore_init(void)
4272 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
4275 if (cpu_has_hypervisor)
4278 ret = uncore_pci_init();
4281 ret = uncore_cpu_init();
4286 uncore_cpumask_init();
4288 uncore_pmus_register();
4293 device_initcall(intel_uncore_init);