1 #ifdef CONFIG_CPU_SUP_INTEL
3 #define MAX_EXTRA_REGS 2
9 int ref; /* reference count */
10 unsigned int extra_reg; /* extra MSR number */
11 u64 extra_config; /* extra MSR config */
16 * This used to coordinate shared registers for HT threads.
18 struct intel_percore {
19 raw_spinlock_t lock; /* protect structure */
20 struct er_account regs[MAX_EXTRA_REGS];
21 int refcnt; /* number of threads */
26 * Intel PerfMon, used on Core and later.
28 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
30 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
31 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
32 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
33 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
34 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
35 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
36 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
39 static struct event_constraint intel_core_event_constraints[] =
41 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
42 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
43 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
44 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
45 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
46 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
50 static struct event_constraint intel_core2_event_constraints[] =
52 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
53 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
55 * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
56 * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
57 * ratio between these counters.
59 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
60 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
61 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
62 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
63 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
64 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
65 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
66 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
67 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
68 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
69 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
73 static struct event_constraint intel_nehalem_event_constraints[] =
75 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
76 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
77 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
78 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
79 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
80 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
81 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
82 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
83 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
84 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
85 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
89 static struct extra_reg intel_nehalem_extra_regs[] =
91 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff),
95 static struct event_constraint intel_nehalem_percore_constraints[] =
97 INTEL_EVENT_CONSTRAINT(0xb7, 0),
101 static struct event_constraint intel_westmere_event_constraints[] =
103 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
104 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
105 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
106 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
107 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
108 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
109 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
113 static struct event_constraint intel_snb_event_constraints[] =
115 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
116 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
117 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
118 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
119 INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */
120 INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */
121 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
122 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
126 static struct extra_reg intel_westmere_extra_regs[] =
128 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff),
129 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff),
133 static struct event_constraint intel_westmere_percore_constraints[] =
135 INTEL_EVENT_CONSTRAINT(0xb7, 0),
136 INTEL_EVENT_CONSTRAINT(0xbb, 0),
140 static struct event_constraint intel_gen_event_constraints[] =
142 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
143 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
144 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
148 static u64 intel_pmu_event_map(int hw_event)
150 return intel_perfmon_event_map[hw_event];
153 static __initconst const u64 snb_hw_cache_event_ids
154 [PERF_COUNT_HW_CACHE_MAX]
155 [PERF_COUNT_HW_CACHE_OP_MAX]
156 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
160 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
161 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
164 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
165 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
167 [ C(OP_PREFETCH) ] = {
168 [ C(RESULT_ACCESS) ] = 0x0,
169 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
174 [ C(RESULT_ACCESS) ] = 0x0,
175 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
178 [ C(RESULT_ACCESS) ] = -1,
179 [ C(RESULT_MISS) ] = -1,
181 [ C(OP_PREFETCH) ] = {
182 [ C(RESULT_ACCESS) ] = 0x0,
183 [ C(RESULT_MISS) ] = 0x0,
188 * TBD: Need Off-core Response Performance Monitoring support
191 /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */
192 [ C(RESULT_ACCESS) ] = 0x01b7,
193 /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */
194 [ C(RESULT_MISS) ] = 0x01bb,
197 /* OFFCORE_RESPONSE_0.ANY_RFO.LOCAL_CACHE */
198 [ C(RESULT_ACCESS) ] = 0x01b7,
199 /* OFFCORE_RESPONSE_1.ANY_RFO.ANY_LLC_MISS */
200 [ C(RESULT_MISS) ] = 0x01bb,
202 [ C(OP_PREFETCH) ] = {
203 /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */
204 [ C(RESULT_ACCESS) ] = 0x01b7,
205 /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */
206 [ C(RESULT_MISS) ] = 0x01bb,
211 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
212 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
215 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
216 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
218 [ C(OP_PREFETCH) ] = {
219 [ C(RESULT_ACCESS) ] = 0x0,
220 [ C(RESULT_MISS) ] = 0x0,
225 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
226 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
229 [ C(RESULT_ACCESS) ] = -1,
230 [ C(RESULT_MISS) ] = -1,
232 [ C(OP_PREFETCH) ] = {
233 [ C(RESULT_ACCESS) ] = -1,
234 [ C(RESULT_MISS) ] = -1,
239 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
240 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
243 [ C(RESULT_ACCESS) ] = -1,
244 [ C(RESULT_MISS) ] = -1,
246 [ C(OP_PREFETCH) ] = {
247 [ C(RESULT_ACCESS) ] = -1,
248 [ C(RESULT_MISS) ] = -1,
253 static __initconst const u64 westmere_hw_cache_event_ids
254 [PERF_COUNT_HW_CACHE_MAX]
255 [PERF_COUNT_HW_CACHE_OP_MAX]
256 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
260 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
261 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
264 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
265 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
267 [ C(OP_PREFETCH) ] = {
268 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
269 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
274 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
275 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
278 [ C(RESULT_ACCESS) ] = -1,
279 [ C(RESULT_MISS) ] = -1,
281 [ C(OP_PREFETCH) ] = {
282 [ C(RESULT_ACCESS) ] = 0x0,
283 [ C(RESULT_MISS) ] = 0x0,
288 /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */
289 [ C(RESULT_ACCESS) ] = 0x01b7,
290 /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */
291 [ C(RESULT_MISS) ] = 0x01bb,
294 * Use RFO, not WRITEBACK, because a write miss would typically occur
298 /* OFFCORE_RESPONSE_1.ANY_RFO.LOCAL_CACHE */
299 [ C(RESULT_ACCESS) ] = 0x01bb,
300 /* OFFCORE_RESPONSE_0.ANY_RFO.ANY_LLC_MISS */
301 [ C(RESULT_MISS) ] = 0x01b7,
303 [ C(OP_PREFETCH) ] = {
304 /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */
305 [ C(RESULT_ACCESS) ] = 0x01b7,
306 /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */
307 [ C(RESULT_MISS) ] = 0x01bb,
312 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
313 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
316 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
317 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
319 [ C(OP_PREFETCH) ] = {
320 [ C(RESULT_ACCESS) ] = 0x0,
321 [ C(RESULT_MISS) ] = 0x0,
326 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
327 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
330 [ C(RESULT_ACCESS) ] = -1,
331 [ C(RESULT_MISS) ] = -1,
333 [ C(OP_PREFETCH) ] = {
334 [ C(RESULT_ACCESS) ] = -1,
335 [ C(RESULT_MISS) ] = -1,
340 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
341 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
344 [ C(RESULT_ACCESS) ] = -1,
345 [ C(RESULT_MISS) ] = -1,
347 [ C(OP_PREFETCH) ] = {
348 [ C(RESULT_ACCESS) ] = -1,
349 [ C(RESULT_MISS) ] = -1,
355 * OFFCORE_RESPONSE MSR bits (subset), See IA32 SDM Vol 3 30.6.1.3
358 #define DMND_DATA_RD (1 << 0)
359 #define DMND_RFO (1 << 1)
360 #define DMND_WB (1 << 3)
361 #define PF_DATA_RD (1 << 4)
362 #define PF_DATA_RFO (1 << 5)
363 #define RESP_UNCORE_HIT (1 << 8)
364 #define RESP_MISS (0xf600) /* non uncore hit */
366 static __initconst const u64 nehalem_hw_cache_extra_regs
367 [PERF_COUNT_HW_CACHE_MAX]
368 [PERF_COUNT_HW_CACHE_OP_MAX]
369 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
373 [ C(RESULT_ACCESS) ] = DMND_DATA_RD|RESP_UNCORE_HIT,
374 [ C(RESULT_MISS) ] = DMND_DATA_RD|RESP_MISS,
377 [ C(RESULT_ACCESS) ] = DMND_RFO|DMND_WB|RESP_UNCORE_HIT,
378 [ C(RESULT_MISS) ] = DMND_RFO|DMND_WB|RESP_MISS,
380 [ C(OP_PREFETCH) ] = {
381 [ C(RESULT_ACCESS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_UNCORE_HIT,
382 [ C(RESULT_MISS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_MISS,
387 static __initconst const u64 nehalem_hw_cache_event_ids
388 [PERF_COUNT_HW_CACHE_MAX]
389 [PERF_COUNT_HW_CACHE_OP_MAX]
390 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
394 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
395 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
398 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
399 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
401 [ C(OP_PREFETCH) ] = {
402 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
403 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
408 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
409 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
412 [ C(RESULT_ACCESS) ] = -1,
413 [ C(RESULT_MISS) ] = -1,
415 [ C(OP_PREFETCH) ] = {
416 [ C(RESULT_ACCESS) ] = 0x0,
417 [ C(RESULT_MISS) ] = 0x0,
422 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
423 [ C(RESULT_ACCESS) ] = 0x01b7,
424 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
425 [ C(RESULT_MISS) ] = 0x01b7,
428 * Use RFO, not WRITEBACK, because a write miss would typically occur
432 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
433 [ C(RESULT_ACCESS) ] = 0x01b7,
434 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
435 [ C(RESULT_MISS) ] = 0x01b7,
437 [ C(OP_PREFETCH) ] = {
438 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
439 [ C(RESULT_ACCESS) ] = 0x01b7,
440 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
441 [ C(RESULT_MISS) ] = 0x01b7,
446 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
447 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
450 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
451 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
453 [ C(OP_PREFETCH) ] = {
454 [ C(RESULT_ACCESS) ] = 0x0,
455 [ C(RESULT_MISS) ] = 0x0,
460 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
461 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
464 [ C(RESULT_ACCESS) ] = -1,
465 [ C(RESULT_MISS) ] = -1,
467 [ C(OP_PREFETCH) ] = {
468 [ C(RESULT_ACCESS) ] = -1,
469 [ C(RESULT_MISS) ] = -1,
474 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
475 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
478 [ C(RESULT_ACCESS) ] = -1,
479 [ C(RESULT_MISS) ] = -1,
481 [ C(OP_PREFETCH) ] = {
482 [ C(RESULT_ACCESS) ] = -1,
483 [ C(RESULT_MISS) ] = -1,
488 static __initconst const u64 core2_hw_cache_event_ids
489 [PERF_COUNT_HW_CACHE_MAX]
490 [PERF_COUNT_HW_CACHE_OP_MAX]
491 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
495 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
496 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
499 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
500 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
502 [ C(OP_PREFETCH) ] = {
503 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
504 [ C(RESULT_MISS) ] = 0,
509 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
510 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
513 [ C(RESULT_ACCESS) ] = -1,
514 [ C(RESULT_MISS) ] = -1,
516 [ C(OP_PREFETCH) ] = {
517 [ C(RESULT_ACCESS) ] = 0,
518 [ C(RESULT_MISS) ] = 0,
523 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
524 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
527 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
528 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
530 [ C(OP_PREFETCH) ] = {
531 [ C(RESULT_ACCESS) ] = 0,
532 [ C(RESULT_MISS) ] = 0,
537 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
538 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
541 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
542 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
544 [ C(OP_PREFETCH) ] = {
545 [ C(RESULT_ACCESS) ] = 0,
546 [ C(RESULT_MISS) ] = 0,
551 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
552 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
555 [ C(RESULT_ACCESS) ] = -1,
556 [ C(RESULT_MISS) ] = -1,
558 [ C(OP_PREFETCH) ] = {
559 [ C(RESULT_ACCESS) ] = -1,
560 [ C(RESULT_MISS) ] = -1,
565 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
566 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
569 [ C(RESULT_ACCESS) ] = -1,
570 [ C(RESULT_MISS) ] = -1,
572 [ C(OP_PREFETCH) ] = {
573 [ C(RESULT_ACCESS) ] = -1,
574 [ C(RESULT_MISS) ] = -1,
579 static __initconst const u64 atom_hw_cache_event_ids
580 [PERF_COUNT_HW_CACHE_MAX]
581 [PERF_COUNT_HW_CACHE_OP_MAX]
582 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
586 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
587 [ C(RESULT_MISS) ] = 0,
590 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
591 [ C(RESULT_MISS) ] = 0,
593 [ C(OP_PREFETCH) ] = {
594 [ C(RESULT_ACCESS) ] = 0x0,
595 [ C(RESULT_MISS) ] = 0,
600 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
601 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
604 [ C(RESULT_ACCESS) ] = -1,
605 [ C(RESULT_MISS) ] = -1,
607 [ C(OP_PREFETCH) ] = {
608 [ C(RESULT_ACCESS) ] = 0,
609 [ C(RESULT_MISS) ] = 0,
614 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
615 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
618 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
619 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
621 [ C(OP_PREFETCH) ] = {
622 [ C(RESULT_ACCESS) ] = 0,
623 [ C(RESULT_MISS) ] = 0,
628 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
629 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
632 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
633 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
635 [ C(OP_PREFETCH) ] = {
636 [ C(RESULT_ACCESS) ] = 0,
637 [ C(RESULT_MISS) ] = 0,
642 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
643 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
646 [ C(RESULT_ACCESS) ] = -1,
647 [ C(RESULT_MISS) ] = -1,
649 [ C(OP_PREFETCH) ] = {
650 [ C(RESULT_ACCESS) ] = -1,
651 [ C(RESULT_MISS) ] = -1,
656 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
657 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
660 [ C(RESULT_ACCESS) ] = -1,
661 [ C(RESULT_MISS) ] = -1,
663 [ C(OP_PREFETCH) ] = {
664 [ C(RESULT_ACCESS) ] = -1,
665 [ C(RESULT_MISS) ] = -1,
670 static void intel_pmu_disable_all(void)
672 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
674 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
676 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
677 intel_pmu_disable_bts();
679 intel_pmu_pebs_disable_all();
680 intel_pmu_lbr_disable_all();
683 static void intel_pmu_enable_all(int added)
685 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
687 intel_pmu_pebs_enable_all();
688 intel_pmu_lbr_enable_all();
689 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
691 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
692 struct perf_event *event =
693 cpuc->events[X86_PMC_IDX_FIXED_BTS];
695 if (WARN_ON_ONCE(!event))
698 intel_pmu_enable_bts(event->hw.config);
704 * Intel Errata AAK100 (model 26)
705 * Intel Errata AAP53 (model 30)
706 * Intel Errata BD53 (model 44)
708 * The official story:
709 * These chips need to be 'reset' when adding counters by programming the
710 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
711 * in sequence on the same PMC or on different PMCs.
713 * In practise it appears some of these events do in fact count, and
714 * we need to programm all 4 events.
716 static void intel_pmu_nhm_workaround(void)
718 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
719 static const unsigned long nhm_magic[4] = {
725 struct perf_event *event;
729 * The Errata requires below steps:
730 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
731 * 2) Configure 4 PERFEVTSELx with the magic events and clear
732 * the corresponding PMCx;
733 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
734 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
735 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
739 * The real steps we choose are a little different from above.
740 * A) To reduce MSR operations, we don't run step 1) as they
741 * are already cleared before this function is called;
742 * B) Call x86_perf_event_update to save PMCx before configuring
743 * PERFEVTSELx with magic number;
744 * C) With step 5), we do clear only when the PERFEVTSELx is
745 * not used currently.
746 * D) Call x86_perf_event_set_period to restore PMCx;
749 /* We always operate 4 pairs of PERF Counters */
750 for (i = 0; i < 4; i++) {
751 event = cpuc->events[i];
753 x86_perf_event_update(event);
756 for (i = 0; i < 4; i++) {
757 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
758 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
761 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
762 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
764 for (i = 0; i < 4; i++) {
765 event = cpuc->events[i];
768 x86_perf_event_set_period(event);
769 __x86_pmu_enable_event(&event->hw,
770 ARCH_PERFMON_EVENTSEL_ENABLE);
772 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
776 static void intel_pmu_nhm_enable_all(int added)
779 intel_pmu_nhm_workaround();
780 intel_pmu_enable_all(added);
783 static inline u64 intel_pmu_get_status(void)
787 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
792 static inline void intel_pmu_ack_status(u64 ack)
794 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
797 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
799 int idx = hwc->idx - X86_PMC_IDX_FIXED;
802 mask = 0xfULL << (idx * 4);
804 rdmsrl(hwc->config_base, ctrl_val);
806 wrmsrl(hwc->config_base, ctrl_val);
809 static void intel_pmu_disable_event(struct perf_event *event)
811 struct hw_perf_event *hwc = &event->hw;
813 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
814 intel_pmu_disable_bts();
815 intel_pmu_drain_bts_buffer();
819 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
820 intel_pmu_disable_fixed(hwc);
824 x86_pmu_disable_event(event);
826 if (unlikely(event->attr.precise_ip))
827 intel_pmu_pebs_disable(event);
830 static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
832 int idx = hwc->idx - X86_PMC_IDX_FIXED;
833 u64 ctrl_val, bits, mask;
836 * Enable IRQ generation (0x8),
837 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
841 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
843 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
847 * ANY bit is supported in v3 and up
849 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
853 mask = 0xfULL << (idx * 4);
855 rdmsrl(hwc->config_base, ctrl_val);
858 wrmsrl(hwc->config_base, ctrl_val);
861 static void intel_pmu_enable_event(struct perf_event *event)
863 struct hw_perf_event *hwc = &event->hw;
865 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
866 if (!__this_cpu_read(cpu_hw_events.enabled))
869 intel_pmu_enable_bts(hwc->config);
873 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
874 intel_pmu_enable_fixed(hwc);
878 if (unlikely(event->attr.precise_ip))
879 intel_pmu_pebs_enable(event);
881 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
885 * Save and restart an expired event. Called by NMI contexts,
886 * so it has to be careful about preempting normal event ops:
888 static int intel_pmu_save_and_restart(struct perf_event *event)
890 x86_perf_event_update(event);
891 return x86_perf_event_set_period(event);
894 static void intel_pmu_reset(void)
896 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
900 if (!x86_pmu.num_counters)
903 local_irq_save(flags);
905 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
907 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
908 checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
909 checking_wrmsrl(x86_pmu_event_addr(idx), 0ull);
911 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
912 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
915 ds->bts_index = ds->bts_buffer_base;
917 local_irq_restore(flags);
921 * This handler is triggered by the local APIC, so the APIC IRQ handling
924 static int intel_pmu_handle_irq(struct pt_regs *regs)
926 struct perf_sample_data data;
927 struct cpu_hw_events *cpuc;
932 perf_sample_data_init(&data, 0);
934 cpuc = &__get_cpu_var(cpu_hw_events);
937 * Some chipsets need to unmask the LVTPC in a particular spot
938 * inside the nmi handler. As a result, the unmasking was pushed
939 * into all the nmi handlers.
941 * This handler doesn't seem to have any issues with the unmasking
942 * so it was left at the top.
944 apic_write(APIC_LVTPC, APIC_DM_NMI);
946 intel_pmu_disable_all();
947 handled = intel_pmu_drain_bts_buffer();
948 status = intel_pmu_get_status();
950 intel_pmu_enable_all(0);
956 intel_pmu_ack_status(status);
958 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
959 perf_event_print_debug();
964 inc_irq_stat(apic_perf_irqs);
966 intel_pmu_lbr_read();
969 * PEBS overflow sets bit 62 in the global status register
971 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
973 x86_pmu.drain_pebs(regs);
976 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
977 struct perf_event *event = cpuc->events[bit];
981 if (!test_bit(bit, cpuc->active_mask))
984 if (!intel_pmu_save_and_restart(event))
987 data.period = event->hw.last_period;
989 if (perf_event_overflow(event, 1, &data, regs))
990 x86_pmu_stop(event, 0);
994 * Repeat if there is more work to be done:
996 status = intel_pmu_get_status();
1001 intel_pmu_enable_all(0);
1005 static struct event_constraint *
1006 intel_bts_constraints(struct perf_event *event)
1008 struct hw_perf_event *hwc = &event->hw;
1009 unsigned int hw_event, bts_event;
1011 if (event->attr.freq)
1014 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1015 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
1017 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
1018 return &bts_constraint;
1023 static struct event_constraint *
1024 intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1026 struct hw_perf_event *hwc = &event->hw;
1027 unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT;
1028 struct event_constraint *c;
1029 struct intel_percore *pc;
1030 struct er_account *era;
1035 if (!x86_pmu.percore_constraints || hwc->extra_alloc)
1038 for (c = x86_pmu.percore_constraints; c->cmask; c++) {
1043 * Allocate resource per core.
1045 pc = cpuc->per_core;
1048 c = &emptyconstraint;
1049 raw_spin_lock(&pc->lock);
1052 for (i = 0; i < MAX_EXTRA_REGS; i++) {
1054 if (era->ref > 0 && hwc->extra_reg == era->extra_reg) {
1055 /* Allow sharing same config */
1056 if (hwc->extra_config == era->extra_config) {
1058 cpuc->percore_used = 1;
1059 hwc->extra_alloc = 1;
1065 } else if (era->ref == 0 && free_slot == -1)
1068 if (!found && free_slot != -1) {
1069 era = &pc->regs[free_slot];
1071 era->extra_reg = hwc->extra_reg;
1072 era->extra_config = hwc->extra_config;
1073 cpuc->percore_used = 1;
1074 hwc->extra_alloc = 1;
1077 raw_spin_unlock(&pc->lock);
1084 static struct event_constraint *
1085 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1087 struct event_constraint *c;
1089 c = intel_bts_constraints(event);
1093 c = intel_pebs_constraints(event);
1097 c = intel_percore_constraints(cpuc, event);
1101 return x86_get_event_constraints(cpuc, event);
1104 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1105 struct perf_event *event)
1107 struct extra_reg *er;
1108 struct intel_percore *pc;
1109 struct er_account *era;
1110 struct hw_perf_event *hwc = &event->hw;
1113 if (!cpuc->percore_used)
1116 for (er = x86_pmu.extra_regs; er->msr; er++) {
1117 if (er->event != (hwc->config & er->config_mask))
1120 pc = cpuc->per_core;
1121 raw_spin_lock(&pc->lock);
1122 for (i = 0; i < MAX_EXTRA_REGS; i++) {
1125 era->extra_config == hwc->extra_config &&
1126 era->extra_reg == er->msr) {
1128 hwc->extra_alloc = 0;
1133 for (i = 0; i < MAX_EXTRA_REGS; i++)
1134 allref += pc->regs[i].ref;
1136 cpuc->percore_used = 0;
1137 raw_spin_unlock(&pc->lock);
1142 static int intel_pmu_hw_config(struct perf_event *event)
1144 int ret = x86_pmu_hw_config(event);
1149 if (event->attr.precise_ip &&
1150 (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1152 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1153 * (0x003c) so that we can use it with PEBS.
1155 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1156 * PEBS capable. However we can use INST_RETIRED.ANY_P
1157 * (0x00c0), which is a PEBS capable event, to get the same
1160 * INST_RETIRED.ANY_P counts the number of cycles that retires
1161 * CNTMASK instructions. By setting CNTMASK to a value (16)
1162 * larger than the maximum number of instructions that can be
1163 * retired per cycle (4) and then inverting the condition, we
1164 * count all cycles that retire 16 or less instructions, which
1167 * Thereby we gain a PEBS capable cycle counter.
1169 u64 alt_config = 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */
1171 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1172 event->hw.config = alt_config;
1175 if (event->attr.type != PERF_TYPE_RAW)
1178 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
1181 if (x86_pmu.version < 3)
1184 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1187 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
1192 static __initconst const struct x86_pmu core_pmu = {
1194 .handle_irq = x86_pmu_handle_irq,
1195 .disable_all = x86_pmu_disable_all,
1196 .enable_all = x86_pmu_enable_all,
1197 .enable = x86_pmu_enable_event,
1198 .disable = x86_pmu_disable_event,
1199 .hw_config = x86_pmu_hw_config,
1200 .schedule_events = x86_schedule_events,
1201 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1202 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
1203 .event_map = intel_pmu_event_map,
1204 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
1207 * Intel PMCs cannot be accessed sanely above 32 bit width,
1208 * so we install an artificial 1<<31 period regardless of
1209 * the generic event period:
1211 .max_period = (1ULL << 31) - 1,
1212 .get_event_constraints = intel_get_event_constraints,
1213 .put_event_constraints = intel_put_event_constraints,
1214 .event_constraints = intel_core_event_constraints,
1217 static int intel_pmu_cpu_prepare(int cpu)
1219 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1221 if (!cpu_has_ht_siblings())
1224 cpuc->per_core = kzalloc_node(sizeof(struct intel_percore),
1225 GFP_KERNEL, cpu_to_node(cpu));
1226 if (!cpuc->per_core)
1229 raw_spin_lock_init(&cpuc->per_core->lock);
1230 cpuc->per_core->core_id = -1;
1234 static void intel_pmu_cpu_starting(int cpu)
1236 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1237 int core_id = topology_core_id(cpu);
1240 init_debug_store_on_cpu(cpu);
1242 * Deal with CPUs that don't clear their LBRs on power-up.
1244 intel_pmu_lbr_reset();
1246 if (!cpu_has_ht_siblings())
1249 for_each_cpu(i, topology_thread_cpumask(cpu)) {
1250 struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core;
1252 if (pc && pc->core_id == core_id) {
1253 kfree(cpuc->per_core);
1254 cpuc->per_core = pc;
1259 cpuc->per_core->core_id = core_id;
1260 cpuc->per_core->refcnt++;
1263 static void intel_pmu_cpu_dying(int cpu)
1265 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1266 struct intel_percore *pc = cpuc->per_core;
1269 if (pc->core_id == -1 || --pc->refcnt == 0)
1271 cpuc->per_core = NULL;
1274 fini_debug_store_on_cpu(cpu);
1277 static __initconst const struct x86_pmu intel_pmu = {
1279 .handle_irq = intel_pmu_handle_irq,
1280 .disable_all = intel_pmu_disable_all,
1281 .enable_all = intel_pmu_enable_all,
1282 .enable = intel_pmu_enable_event,
1283 .disable = intel_pmu_disable_event,
1284 .hw_config = intel_pmu_hw_config,
1285 .schedule_events = x86_schedule_events,
1286 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1287 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
1288 .event_map = intel_pmu_event_map,
1289 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
1292 * Intel PMCs cannot be accessed sanely above 32 bit width,
1293 * so we install an artificial 1<<31 period regardless of
1294 * the generic event period:
1296 .max_period = (1ULL << 31) - 1,
1297 .get_event_constraints = intel_get_event_constraints,
1298 .put_event_constraints = intel_put_event_constraints,
1300 .cpu_prepare = intel_pmu_cpu_prepare,
1301 .cpu_starting = intel_pmu_cpu_starting,
1302 .cpu_dying = intel_pmu_cpu_dying,
1305 static void intel_clovertown_quirks(void)
1308 * PEBS is unreliable due to:
1310 * AJ67 - PEBS may experience CPL leaks
1311 * AJ68 - PEBS PMI may be delayed by one event
1312 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1313 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1315 * AJ67 could be worked around by restricting the OS/USR flags.
1316 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1318 * AJ106 could possibly be worked around by not allowing LBR
1319 * usage from PEBS, including the fixup.
1320 * AJ68 could possibly be worked around by always programming
1321 * a pebs_event_reset[0] value and coping with the lost events.
1323 * But taken together it might just make sense to not enable PEBS on
1326 printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
1328 x86_pmu.pebs_constraints = NULL;
1331 static __init int intel_pmu_init(void)
1333 union cpuid10_edx edx;
1334 union cpuid10_eax eax;
1335 unsigned int unused;
1339 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
1340 switch (boot_cpu_data.x86) {
1342 return p6_pmu_init();
1344 return p4_pmu_init();
1350 * Check whether the Architectural PerfMon supports
1351 * Branch Misses Retired hw_event or not.
1353 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
1354 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
1357 version = eax.split.version_id;
1361 x86_pmu = intel_pmu;
1363 x86_pmu.version = version;
1364 x86_pmu.num_counters = eax.split.num_counters;
1365 x86_pmu.cntval_bits = eax.split.bit_width;
1366 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
1369 * Quirk: v2 perfmon does not report fixed-purpose events, so
1370 * assume at least 3 events:
1373 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
1376 * v2 and above have a perf capabilities MSR
1381 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
1382 x86_pmu.intel_cap.capabilities = capabilities;
1388 * Install the hw-cache-events table:
1390 switch (boot_cpu_data.x86_model) {
1391 case 14: /* 65 nm core solo/duo, "Yonah" */
1392 pr_cont("Core events, ");
1395 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1396 x86_pmu.quirks = intel_clovertown_quirks;
1397 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1398 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1399 case 29: /* six-core 45 nm xeon "Dunnington" */
1400 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
1401 sizeof(hw_cache_event_ids));
1403 intel_pmu_lbr_init_core();
1405 x86_pmu.event_constraints = intel_core2_event_constraints;
1406 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
1407 pr_cont("Core2 events, ");
1410 case 26: /* 45 nm nehalem, "Bloomfield" */
1411 case 30: /* 45 nm nehalem, "Lynnfield" */
1412 case 46: /* 45 nm nehalem-ex, "Beckton" */
1413 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
1414 sizeof(hw_cache_event_ids));
1415 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
1416 sizeof(hw_cache_extra_regs));
1418 intel_pmu_lbr_init_nhm();
1420 x86_pmu.event_constraints = intel_nehalem_event_constraints;
1421 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
1422 x86_pmu.percore_constraints = intel_nehalem_percore_constraints;
1423 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
1424 x86_pmu.extra_regs = intel_nehalem_extra_regs;
1428 * Erratum AAJ80 detected, we work it around by using
1429 * the BR_MISP_EXEC.ANY event. This will over-count
1430 * branch-misses, but it's still much better than the
1431 * architectural event which is often completely bogus:
1433 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
1435 pr_cont("erratum AAJ80 worked around, ");
1437 pr_cont("Nehalem events, ");
1441 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
1442 sizeof(hw_cache_event_ids));
1444 intel_pmu_lbr_init_atom();
1446 x86_pmu.event_constraints = intel_gen_event_constraints;
1447 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
1448 pr_cont("Atom events, ");
1451 case 37: /* 32 nm nehalem, "Clarkdale" */
1452 case 44: /* 32 nm nehalem, "Gulftown" */
1453 case 47: /* 32 nm Xeon E7 */
1454 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
1455 sizeof(hw_cache_event_ids));
1456 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
1457 sizeof(hw_cache_extra_regs));
1459 intel_pmu_lbr_init_nhm();
1461 x86_pmu.event_constraints = intel_westmere_event_constraints;
1462 x86_pmu.percore_constraints = intel_westmere_percore_constraints;
1463 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
1464 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
1465 x86_pmu.extra_regs = intel_westmere_extra_regs;
1466 pr_cont("Westmere events, ");
1469 case 42: /* SandyBridge */
1470 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
1471 sizeof(hw_cache_event_ids));
1473 intel_pmu_lbr_init_nhm();
1475 x86_pmu.event_constraints = intel_snb_event_constraints;
1476 x86_pmu.pebs_constraints = intel_snb_pebs_events;
1477 pr_cont("SandyBridge events, ");
1482 * default constraints for v2 and up
1484 x86_pmu.event_constraints = intel_gen_event_constraints;
1485 pr_cont("generic architected perfmon, ");
1490 #else /* CONFIG_CPU_SUP_INTEL */
1492 static int intel_pmu_init(void)
1497 #endif /* CONFIG_CPU_SUP_INTEL */