4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/nmi.h>
17 #include <asm/cpufeature.h>
18 #include <asm/hardirq.h>
21 #include "perf_event.h"
24 * Intel PerfMon, used on Core and later.
26 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
28 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
29 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
30 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
31 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
32 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
33 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
34 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
35 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
38 static struct event_constraint intel_core_event_constraints[] __read_mostly =
40 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
41 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
42 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
43 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
44 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
45 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
49 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
51 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
52 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
53 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
54 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
55 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
56 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
57 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
58 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
59 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
60 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
61 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
62 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
63 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
67 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
69 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
70 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
71 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
72 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
73 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
74 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
75 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
76 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
77 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
78 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
79 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
83 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
85 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
86 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
87 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
91 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
93 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
94 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
95 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
96 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
97 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
98 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
99 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
103 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
105 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
106 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
107 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
108 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
109 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
110 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
111 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
112 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
113 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
114 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
115 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
116 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
118 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
119 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
120 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
121 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
126 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
128 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
129 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
130 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
131 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
132 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
133 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
134 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
135 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
136 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
137 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
138 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
139 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
140 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
142 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
143 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
144 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
145 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
150 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
152 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
153 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
154 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
155 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
159 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
164 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
166 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
167 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
168 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
172 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
174 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
175 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
176 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
180 struct event_constraint intel_skl_event_constraints[] = {
181 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
182 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
183 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
184 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
188 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
189 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
190 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
191 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
192 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
196 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
197 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
198 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
199 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
200 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
204 static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
205 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
206 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
207 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
209 * Note the low 8 bits eventsel code is not a continuous field, containing
210 * some #GPing bits. These are masked out.
212 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
216 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
217 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
218 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
220 struct attribute *nhm_events_attrs[] = {
221 EVENT_PTR(mem_ld_nhm),
225 struct attribute *snb_events_attrs[] = {
226 EVENT_PTR(mem_ld_snb),
227 EVENT_PTR(mem_st_snb),
231 static struct event_constraint intel_hsw_event_constraints[] = {
232 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
233 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
234 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
235 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
236 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
237 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
238 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
239 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
240 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
241 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
242 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
243 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
245 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
246 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
247 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
248 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
253 struct event_constraint intel_bdw_event_constraints[] = {
254 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
255 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
256 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
257 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
258 INTEL_UEVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
262 static u64 intel_pmu_event_map(int hw_event)
264 return intel_perfmon_event_map[hw_event];
268 * Notes on the events:
269 * - data reads do not include code reads (comparable to earlier tables)
270 * - data counts include speculative execution (except L1 write, dtlb, bpu)
271 * - remote node access includes remote memory, remote cache, remote mmio.
272 * - prefetches are not included in the counts.
273 * - icache miss does not include decoded icache
276 #define SKL_DEMAND_DATA_RD BIT_ULL(0)
277 #define SKL_DEMAND_RFO BIT_ULL(1)
278 #define SKL_ANY_RESPONSE BIT_ULL(16)
279 #define SKL_SUPPLIER_NONE BIT_ULL(17)
280 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
281 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
282 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
283 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
284 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
285 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
286 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
287 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
288 #define SKL_SPL_HIT BIT_ULL(30)
289 #define SKL_SNOOP_NONE BIT_ULL(31)
290 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
291 #define SKL_SNOOP_MISS BIT_ULL(33)
292 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
293 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
294 #define SKL_SNOOP_HITM BIT_ULL(36)
295 #define SKL_SNOOP_NON_DRAM BIT_ULL(37)
296 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
297 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
298 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
299 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
300 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
301 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
302 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
303 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
304 SKL_SNOOP_HITM|SKL_SPL_HIT)
305 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO
306 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE
307 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
308 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
309 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
311 static __initconst const u64 skl_hw_cache_event_ids
312 [PERF_COUNT_HW_CACHE_MAX]
313 [PERF_COUNT_HW_CACHE_OP_MAX]
314 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
318 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
319 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
322 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
323 [ C(RESULT_MISS) ] = 0x0,
325 [ C(OP_PREFETCH) ] = {
326 [ C(RESULT_ACCESS) ] = 0x0,
327 [ C(RESULT_MISS) ] = 0x0,
332 [ C(RESULT_ACCESS) ] = 0x0,
333 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */
336 [ C(RESULT_ACCESS) ] = -1,
337 [ C(RESULT_MISS) ] = -1,
339 [ C(OP_PREFETCH) ] = {
340 [ C(RESULT_ACCESS) ] = 0x0,
341 [ C(RESULT_MISS) ] = 0x0,
346 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
347 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
350 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
351 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
353 [ C(OP_PREFETCH) ] = {
354 [ C(RESULT_ACCESS) ] = 0x0,
355 [ C(RESULT_MISS) ] = 0x0,
360 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
361 [ C(RESULT_MISS) ] = 0x608, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
364 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
365 [ C(RESULT_MISS) ] = 0x649, /* DTLB_STORE_MISSES.WALK_COMPLETED */
367 [ C(OP_PREFETCH) ] = {
368 [ C(RESULT_ACCESS) ] = 0x0,
369 [ C(RESULT_MISS) ] = 0x0,
374 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */
375 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */
378 [ C(RESULT_ACCESS) ] = -1,
379 [ C(RESULT_MISS) ] = -1,
381 [ C(OP_PREFETCH) ] = {
382 [ C(RESULT_ACCESS) ] = -1,
383 [ C(RESULT_MISS) ] = -1,
388 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
389 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
392 [ C(RESULT_ACCESS) ] = -1,
393 [ C(RESULT_MISS) ] = -1,
395 [ C(OP_PREFETCH) ] = {
396 [ C(RESULT_ACCESS) ] = -1,
397 [ C(RESULT_MISS) ] = -1,
402 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
403 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
406 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
407 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
409 [ C(OP_PREFETCH) ] = {
410 [ C(RESULT_ACCESS) ] = 0x0,
411 [ C(RESULT_MISS) ] = 0x0,
416 static __initconst const u64 skl_hw_cache_extra_regs
417 [PERF_COUNT_HW_CACHE_MAX]
418 [PERF_COUNT_HW_CACHE_OP_MAX]
419 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
423 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
424 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
425 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
426 SKL_L3_MISS|SKL_ANY_SNOOP|
430 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
431 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
432 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
433 SKL_L3_MISS|SKL_ANY_SNOOP|
436 [ C(OP_PREFETCH) ] = {
437 [ C(RESULT_ACCESS) ] = 0x0,
438 [ C(RESULT_MISS) ] = 0x0,
443 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
444 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
445 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
446 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
449 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
450 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
451 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
452 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
454 [ C(OP_PREFETCH) ] = {
455 [ C(RESULT_ACCESS) ] = 0x0,
456 [ C(RESULT_MISS) ] = 0x0,
461 #define SNB_DMND_DATA_RD (1ULL << 0)
462 #define SNB_DMND_RFO (1ULL << 1)
463 #define SNB_DMND_IFETCH (1ULL << 2)
464 #define SNB_DMND_WB (1ULL << 3)
465 #define SNB_PF_DATA_RD (1ULL << 4)
466 #define SNB_PF_RFO (1ULL << 5)
467 #define SNB_PF_IFETCH (1ULL << 6)
468 #define SNB_LLC_DATA_RD (1ULL << 7)
469 #define SNB_LLC_RFO (1ULL << 8)
470 #define SNB_LLC_IFETCH (1ULL << 9)
471 #define SNB_BUS_LOCKS (1ULL << 10)
472 #define SNB_STRM_ST (1ULL << 11)
473 #define SNB_OTHER (1ULL << 15)
474 #define SNB_RESP_ANY (1ULL << 16)
475 #define SNB_NO_SUPP (1ULL << 17)
476 #define SNB_LLC_HITM (1ULL << 18)
477 #define SNB_LLC_HITE (1ULL << 19)
478 #define SNB_LLC_HITS (1ULL << 20)
479 #define SNB_LLC_HITF (1ULL << 21)
480 #define SNB_LOCAL (1ULL << 22)
481 #define SNB_REMOTE (0xffULL << 23)
482 #define SNB_SNP_NONE (1ULL << 31)
483 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
484 #define SNB_SNP_MISS (1ULL << 33)
485 #define SNB_NO_FWD (1ULL << 34)
486 #define SNB_SNP_FWD (1ULL << 35)
487 #define SNB_HITM (1ULL << 36)
488 #define SNB_NON_DRAM (1ULL << 37)
490 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
491 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
492 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
494 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
495 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
498 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
499 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
501 #define SNB_L3_ACCESS SNB_RESP_ANY
502 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
504 static __initconst const u64 snb_hw_cache_extra_regs
505 [PERF_COUNT_HW_CACHE_MAX]
506 [PERF_COUNT_HW_CACHE_OP_MAX]
507 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
511 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
512 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
515 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
516 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
518 [ C(OP_PREFETCH) ] = {
519 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
520 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
525 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
526 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
529 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
530 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
532 [ C(OP_PREFETCH) ] = {
533 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
534 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
539 static __initconst const u64 snb_hw_cache_event_ids
540 [PERF_COUNT_HW_CACHE_MAX]
541 [PERF_COUNT_HW_CACHE_OP_MAX]
542 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
546 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
547 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
550 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
551 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
553 [ C(OP_PREFETCH) ] = {
554 [ C(RESULT_ACCESS) ] = 0x0,
555 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
560 [ C(RESULT_ACCESS) ] = 0x0,
561 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
564 [ C(RESULT_ACCESS) ] = -1,
565 [ C(RESULT_MISS) ] = -1,
567 [ C(OP_PREFETCH) ] = {
568 [ C(RESULT_ACCESS) ] = 0x0,
569 [ C(RESULT_MISS) ] = 0x0,
574 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
575 [ C(RESULT_ACCESS) ] = 0x01b7,
576 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
577 [ C(RESULT_MISS) ] = 0x01b7,
580 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
581 [ C(RESULT_ACCESS) ] = 0x01b7,
582 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
583 [ C(RESULT_MISS) ] = 0x01b7,
585 [ C(OP_PREFETCH) ] = {
586 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
587 [ C(RESULT_ACCESS) ] = 0x01b7,
588 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
589 [ C(RESULT_MISS) ] = 0x01b7,
594 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
595 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
598 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
599 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
601 [ C(OP_PREFETCH) ] = {
602 [ C(RESULT_ACCESS) ] = 0x0,
603 [ C(RESULT_MISS) ] = 0x0,
608 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
609 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
612 [ C(RESULT_ACCESS) ] = -1,
613 [ C(RESULT_MISS) ] = -1,
615 [ C(OP_PREFETCH) ] = {
616 [ C(RESULT_ACCESS) ] = -1,
617 [ C(RESULT_MISS) ] = -1,
622 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
623 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
626 [ C(RESULT_ACCESS) ] = -1,
627 [ C(RESULT_MISS) ] = -1,
629 [ C(OP_PREFETCH) ] = {
630 [ C(RESULT_ACCESS) ] = -1,
631 [ C(RESULT_MISS) ] = -1,
636 [ C(RESULT_ACCESS) ] = 0x01b7,
637 [ C(RESULT_MISS) ] = 0x01b7,
640 [ C(RESULT_ACCESS) ] = 0x01b7,
641 [ C(RESULT_MISS) ] = 0x01b7,
643 [ C(OP_PREFETCH) ] = {
644 [ C(RESULT_ACCESS) ] = 0x01b7,
645 [ C(RESULT_MISS) ] = 0x01b7,
652 * Notes on the events:
653 * - data reads do not include code reads (comparable to earlier tables)
654 * - data counts include speculative execution (except L1 write, dtlb, bpu)
655 * - remote node access includes remote memory, remote cache, remote mmio.
656 * - prefetches are not included in the counts because they are not
660 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
661 #define HSW_DEMAND_RFO BIT_ULL(1)
662 #define HSW_ANY_RESPONSE BIT_ULL(16)
663 #define HSW_SUPPLIER_NONE BIT_ULL(17)
664 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
665 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
666 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
667 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
668 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
669 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
670 HSW_L3_MISS_REMOTE_HOP2P)
671 #define HSW_SNOOP_NONE BIT_ULL(31)
672 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
673 #define HSW_SNOOP_MISS BIT_ULL(33)
674 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
675 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
676 #define HSW_SNOOP_HITM BIT_ULL(36)
677 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
678 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
679 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
680 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
681 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
682 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
683 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
684 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
685 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
686 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
687 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
689 #define BDW_L3_MISS_LOCAL BIT(26)
690 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
691 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
692 HSW_L3_MISS_REMOTE_HOP2P)
695 static __initconst const u64 hsw_hw_cache_event_ids
696 [PERF_COUNT_HW_CACHE_MAX]
697 [PERF_COUNT_HW_CACHE_OP_MAX]
698 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
702 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
703 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
706 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
707 [ C(RESULT_MISS) ] = 0x0,
709 [ C(OP_PREFETCH) ] = {
710 [ C(RESULT_ACCESS) ] = 0x0,
711 [ C(RESULT_MISS) ] = 0x0,
716 [ C(RESULT_ACCESS) ] = 0x0,
717 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
720 [ C(RESULT_ACCESS) ] = -1,
721 [ C(RESULT_MISS) ] = -1,
723 [ C(OP_PREFETCH) ] = {
724 [ C(RESULT_ACCESS) ] = 0x0,
725 [ C(RESULT_MISS) ] = 0x0,
730 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
731 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
734 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
735 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
737 [ C(OP_PREFETCH) ] = {
738 [ C(RESULT_ACCESS) ] = 0x0,
739 [ C(RESULT_MISS) ] = 0x0,
744 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
745 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
748 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
749 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
751 [ C(OP_PREFETCH) ] = {
752 [ C(RESULT_ACCESS) ] = 0x0,
753 [ C(RESULT_MISS) ] = 0x0,
758 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
759 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
762 [ C(RESULT_ACCESS) ] = -1,
763 [ C(RESULT_MISS) ] = -1,
765 [ C(OP_PREFETCH) ] = {
766 [ C(RESULT_ACCESS) ] = -1,
767 [ C(RESULT_MISS) ] = -1,
772 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
773 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
776 [ C(RESULT_ACCESS) ] = -1,
777 [ C(RESULT_MISS) ] = -1,
779 [ C(OP_PREFETCH) ] = {
780 [ C(RESULT_ACCESS) ] = -1,
781 [ C(RESULT_MISS) ] = -1,
786 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
787 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
790 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
791 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
793 [ C(OP_PREFETCH) ] = {
794 [ C(RESULT_ACCESS) ] = 0x0,
795 [ C(RESULT_MISS) ] = 0x0,
800 static __initconst const u64 hsw_hw_cache_extra_regs
801 [PERF_COUNT_HW_CACHE_MAX]
802 [PERF_COUNT_HW_CACHE_OP_MAX]
803 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
807 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
809 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
810 HSW_L3_MISS|HSW_ANY_SNOOP,
813 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
815 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
816 HSW_L3_MISS|HSW_ANY_SNOOP,
818 [ C(OP_PREFETCH) ] = {
819 [ C(RESULT_ACCESS) ] = 0x0,
820 [ C(RESULT_MISS) ] = 0x0,
825 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
826 HSW_L3_MISS_LOCAL_DRAM|
828 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
833 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
834 HSW_L3_MISS_LOCAL_DRAM|
836 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
840 [ C(OP_PREFETCH) ] = {
841 [ C(RESULT_ACCESS) ] = 0x0,
842 [ C(RESULT_MISS) ] = 0x0,
847 static __initconst const u64 westmere_hw_cache_event_ids
848 [PERF_COUNT_HW_CACHE_MAX]
849 [PERF_COUNT_HW_CACHE_OP_MAX]
850 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
854 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
855 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
858 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
859 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
861 [ C(OP_PREFETCH) ] = {
862 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
863 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
868 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
869 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
872 [ C(RESULT_ACCESS) ] = -1,
873 [ C(RESULT_MISS) ] = -1,
875 [ C(OP_PREFETCH) ] = {
876 [ C(RESULT_ACCESS) ] = 0x0,
877 [ C(RESULT_MISS) ] = 0x0,
882 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
883 [ C(RESULT_ACCESS) ] = 0x01b7,
884 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
885 [ C(RESULT_MISS) ] = 0x01b7,
888 * Use RFO, not WRITEBACK, because a write miss would typically occur
892 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
893 [ C(RESULT_ACCESS) ] = 0x01b7,
894 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
895 [ C(RESULT_MISS) ] = 0x01b7,
897 [ C(OP_PREFETCH) ] = {
898 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
899 [ C(RESULT_ACCESS) ] = 0x01b7,
900 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
901 [ C(RESULT_MISS) ] = 0x01b7,
906 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
907 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
910 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
911 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
913 [ C(OP_PREFETCH) ] = {
914 [ C(RESULT_ACCESS) ] = 0x0,
915 [ C(RESULT_MISS) ] = 0x0,
920 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
921 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
924 [ C(RESULT_ACCESS) ] = -1,
925 [ C(RESULT_MISS) ] = -1,
927 [ C(OP_PREFETCH) ] = {
928 [ C(RESULT_ACCESS) ] = -1,
929 [ C(RESULT_MISS) ] = -1,
934 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
935 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
938 [ C(RESULT_ACCESS) ] = -1,
939 [ C(RESULT_MISS) ] = -1,
941 [ C(OP_PREFETCH) ] = {
942 [ C(RESULT_ACCESS) ] = -1,
943 [ C(RESULT_MISS) ] = -1,
948 [ C(RESULT_ACCESS) ] = 0x01b7,
949 [ C(RESULT_MISS) ] = 0x01b7,
952 [ C(RESULT_ACCESS) ] = 0x01b7,
953 [ C(RESULT_MISS) ] = 0x01b7,
955 [ C(OP_PREFETCH) ] = {
956 [ C(RESULT_ACCESS) ] = 0x01b7,
957 [ C(RESULT_MISS) ] = 0x01b7,
963 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
964 * See IA32 SDM Vol 3B 30.6.1.3
967 #define NHM_DMND_DATA_RD (1 << 0)
968 #define NHM_DMND_RFO (1 << 1)
969 #define NHM_DMND_IFETCH (1 << 2)
970 #define NHM_DMND_WB (1 << 3)
971 #define NHM_PF_DATA_RD (1 << 4)
972 #define NHM_PF_DATA_RFO (1 << 5)
973 #define NHM_PF_IFETCH (1 << 6)
974 #define NHM_OFFCORE_OTHER (1 << 7)
975 #define NHM_UNCORE_HIT (1 << 8)
976 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
977 #define NHM_OTHER_CORE_HITM (1 << 10)
979 #define NHM_REMOTE_CACHE_FWD (1 << 12)
980 #define NHM_REMOTE_DRAM (1 << 13)
981 #define NHM_LOCAL_DRAM (1 << 14)
982 #define NHM_NON_DRAM (1 << 15)
984 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
985 #define NHM_REMOTE (NHM_REMOTE_DRAM)
987 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
988 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
989 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
991 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
992 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
993 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
995 static __initconst const u64 nehalem_hw_cache_extra_regs
996 [PERF_COUNT_HW_CACHE_MAX]
997 [PERF_COUNT_HW_CACHE_OP_MAX]
998 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1002 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1003 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
1006 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1007 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
1009 [ C(OP_PREFETCH) ] = {
1010 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1011 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1016 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1017 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
1020 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1021 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
1023 [ C(OP_PREFETCH) ] = {
1024 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1025 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1030 static __initconst const u64 nehalem_hw_cache_event_ids
1031 [PERF_COUNT_HW_CACHE_MAX]
1032 [PERF_COUNT_HW_CACHE_OP_MAX]
1033 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1037 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1038 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1041 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1042 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1044 [ C(OP_PREFETCH) ] = {
1045 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1046 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1051 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1052 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1055 [ C(RESULT_ACCESS) ] = -1,
1056 [ C(RESULT_MISS) ] = -1,
1058 [ C(OP_PREFETCH) ] = {
1059 [ C(RESULT_ACCESS) ] = 0x0,
1060 [ C(RESULT_MISS) ] = 0x0,
1065 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1066 [ C(RESULT_ACCESS) ] = 0x01b7,
1067 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1068 [ C(RESULT_MISS) ] = 0x01b7,
1071 * Use RFO, not WRITEBACK, because a write miss would typically occur
1075 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1076 [ C(RESULT_ACCESS) ] = 0x01b7,
1077 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1078 [ C(RESULT_MISS) ] = 0x01b7,
1080 [ C(OP_PREFETCH) ] = {
1081 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1082 [ C(RESULT_ACCESS) ] = 0x01b7,
1083 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1084 [ C(RESULT_MISS) ] = 0x01b7,
1089 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1090 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1093 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1094 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1096 [ C(OP_PREFETCH) ] = {
1097 [ C(RESULT_ACCESS) ] = 0x0,
1098 [ C(RESULT_MISS) ] = 0x0,
1103 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1104 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
1107 [ C(RESULT_ACCESS) ] = -1,
1108 [ C(RESULT_MISS) ] = -1,
1110 [ C(OP_PREFETCH) ] = {
1111 [ C(RESULT_ACCESS) ] = -1,
1112 [ C(RESULT_MISS) ] = -1,
1117 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1118 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1121 [ C(RESULT_ACCESS) ] = -1,
1122 [ C(RESULT_MISS) ] = -1,
1124 [ C(OP_PREFETCH) ] = {
1125 [ C(RESULT_ACCESS) ] = -1,
1126 [ C(RESULT_MISS) ] = -1,
1131 [ C(RESULT_ACCESS) ] = 0x01b7,
1132 [ C(RESULT_MISS) ] = 0x01b7,
1135 [ C(RESULT_ACCESS) ] = 0x01b7,
1136 [ C(RESULT_MISS) ] = 0x01b7,
1138 [ C(OP_PREFETCH) ] = {
1139 [ C(RESULT_ACCESS) ] = 0x01b7,
1140 [ C(RESULT_MISS) ] = 0x01b7,
1145 static __initconst const u64 core2_hw_cache_event_ids
1146 [PERF_COUNT_HW_CACHE_MAX]
1147 [PERF_COUNT_HW_CACHE_OP_MAX]
1148 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1152 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
1153 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
1156 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
1157 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
1159 [ C(OP_PREFETCH) ] = {
1160 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
1161 [ C(RESULT_MISS) ] = 0,
1166 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
1167 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
1170 [ C(RESULT_ACCESS) ] = -1,
1171 [ C(RESULT_MISS) ] = -1,
1173 [ C(OP_PREFETCH) ] = {
1174 [ C(RESULT_ACCESS) ] = 0,
1175 [ C(RESULT_MISS) ] = 0,
1180 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1181 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1184 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1185 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1187 [ C(OP_PREFETCH) ] = {
1188 [ C(RESULT_ACCESS) ] = 0,
1189 [ C(RESULT_MISS) ] = 0,
1194 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1195 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
1198 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1199 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
1201 [ C(OP_PREFETCH) ] = {
1202 [ C(RESULT_ACCESS) ] = 0,
1203 [ C(RESULT_MISS) ] = 0,
1208 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1209 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
1212 [ C(RESULT_ACCESS) ] = -1,
1213 [ C(RESULT_MISS) ] = -1,
1215 [ C(OP_PREFETCH) ] = {
1216 [ C(RESULT_ACCESS) ] = -1,
1217 [ C(RESULT_MISS) ] = -1,
1222 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1223 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1226 [ C(RESULT_ACCESS) ] = -1,
1227 [ C(RESULT_MISS) ] = -1,
1229 [ C(OP_PREFETCH) ] = {
1230 [ C(RESULT_ACCESS) ] = -1,
1231 [ C(RESULT_MISS) ] = -1,
1236 static __initconst const u64 atom_hw_cache_event_ids
1237 [PERF_COUNT_HW_CACHE_MAX]
1238 [PERF_COUNT_HW_CACHE_OP_MAX]
1239 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1243 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1244 [ C(RESULT_MISS) ] = 0,
1247 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1248 [ C(RESULT_MISS) ] = 0,
1250 [ C(OP_PREFETCH) ] = {
1251 [ C(RESULT_ACCESS) ] = 0x0,
1252 [ C(RESULT_MISS) ] = 0,
1257 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1258 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1261 [ C(RESULT_ACCESS) ] = -1,
1262 [ C(RESULT_MISS) ] = -1,
1264 [ C(OP_PREFETCH) ] = {
1265 [ C(RESULT_ACCESS) ] = 0,
1266 [ C(RESULT_MISS) ] = 0,
1271 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1272 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1275 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1276 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1278 [ C(OP_PREFETCH) ] = {
1279 [ C(RESULT_ACCESS) ] = 0,
1280 [ C(RESULT_MISS) ] = 0,
1285 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1286 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1289 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1290 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1292 [ C(OP_PREFETCH) ] = {
1293 [ C(RESULT_ACCESS) ] = 0,
1294 [ C(RESULT_MISS) ] = 0,
1299 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1300 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1303 [ C(RESULT_ACCESS) ] = -1,
1304 [ C(RESULT_MISS) ] = -1,
1306 [ C(OP_PREFETCH) ] = {
1307 [ C(RESULT_ACCESS) ] = -1,
1308 [ C(RESULT_MISS) ] = -1,
1313 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1314 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1317 [ C(RESULT_ACCESS) ] = -1,
1318 [ C(RESULT_MISS) ] = -1,
1320 [ C(OP_PREFETCH) ] = {
1321 [ C(RESULT_ACCESS) ] = -1,
1322 [ C(RESULT_MISS) ] = -1,
1327 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1329 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1330 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1331 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1335 #define SLM_DMND_READ SNB_DMND_DATA_RD
1336 #define SLM_DMND_WRITE SNB_DMND_RFO
1337 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1339 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1340 #define SLM_LLC_ACCESS SNB_RESP_ANY
1341 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1343 static __initconst const u64 slm_hw_cache_extra_regs
1344 [PERF_COUNT_HW_CACHE_MAX]
1345 [PERF_COUNT_HW_CACHE_OP_MAX]
1346 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1350 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1351 [ C(RESULT_MISS) ] = 0,
1354 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1355 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1357 [ C(OP_PREFETCH) ] = {
1358 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1359 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1364 static __initconst const u64 slm_hw_cache_event_ids
1365 [PERF_COUNT_HW_CACHE_MAX]
1366 [PERF_COUNT_HW_CACHE_OP_MAX]
1367 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1371 [ C(RESULT_ACCESS) ] = 0,
1372 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1375 [ C(RESULT_ACCESS) ] = 0,
1376 [ C(RESULT_MISS) ] = 0,
1378 [ C(OP_PREFETCH) ] = {
1379 [ C(RESULT_ACCESS) ] = 0,
1380 [ C(RESULT_MISS) ] = 0,
1385 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1386 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1389 [ C(RESULT_ACCESS) ] = -1,
1390 [ C(RESULT_MISS) ] = -1,
1392 [ C(OP_PREFETCH) ] = {
1393 [ C(RESULT_ACCESS) ] = 0,
1394 [ C(RESULT_MISS) ] = 0,
1399 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1400 [ C(RESULT_ACCESS) ] = 0x01b7,
1401 [ C(RESULT_MISS) ] = 0,
1404 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1405 [ C(RESULT_ACCESS) ] = 0x01b7,
1406 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1407 [ C(RESULT_MISS) ] = 0x01b7,
1409 [ C(OP_PREFETCH) ] = {
1410 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1411 [ C(RESULT_ACCESS) ] = 0x01b7,
1412 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1413 [ C(RESULT_MISS) ] = 0x01b7,
1418 [ C(RESULT_ACCESS) ] = 0,
1419 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
1422 [ C(RESULT_ACCESS) ] = 0,
1423 [ C(RESULT_MISS) ] = 0,
1425 [ C(OP_PREFETCH) ] = {
1426 [ C(RESULT_ACCESS) ] = 0,
1427 [ C(RESULT_MISS) ] = 0,
1432 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1433 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1436 [ C(RESULT_ACCESS) ] = -1,
1437 [ C(RESULT_MISS) ] = -1,
1439 [ C(OP_PREFETCH) ] = {
1440 [ C(RESULT_ACCESS) ] = -1,
1441 [ C(RESULT_MISS) ] = -1,
1446 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1447 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1450 [ C(RESULT_ACCESS) ] = -1,
1451 [ C(RESULT_MISS) ] = -1,
1453 [ C(OP_PREFETCH) ] = {
1454 [ C(RESULT_ACCESS) ] = -1,
1455 [ C(RESULT_MISS) ] = -1,
1461 * Use from PMIs where the LBRs are already disabled.
1463 static void __intel_pmu_disable_all(void)
1465 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1467 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1469 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1470 intel_pmu_disable_bts();
1472 intel_bts_disable_local();
1474 intel_pmu_pebs_disable_all();
1477 static void intel_pmu_disable_all(void)
1479 __intel_pmu_disable_all();
1480 intel_pmu_lbr_disable_all();
1483 static void __intel_pmu_enable_all(int added, bool pmi)
1485 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1487 intel_pmu_pebs_enable_all();
1488 intel_pmu_lbr_enable_all(pmi);
1489 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1490 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
1492 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1493 struct perf_event *event =
1494 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
1496 if (WARN_ON_ONCE(!event))
1499 intel_pmu_enable_bts(event->hw.config);
1501 intel_bts_enable_local();
1504 static void intel_pmu_enable_all(int added)
1506 __intel_pmu_enable_all(added, false);
1511 * Intel Errata AAK100 (model 26)
1512 * Intel Errata AAP53 (model 30)
1513 * Intel Errata BD53 (model 44)
1515 * The official story:
1516 * These chips need to be 'reset' when adding counters by programming the
1517 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1518 * in sequence on the same PMC or on different PMCs.
1520 * In practise it appears some of these events do in fact count, and
1521 * we need to programm all 4 events.
1523 static void intel_pmu_nhm_workaround(void)
1525 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1526 static const unsigned long nhm_magic[4] = {
1532 struct perf_event *event;
1536 * The Errata requires below steps:
1537 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1538 * 2) Configure 4 PERFEVTSELx with the magic events and clear
1539 * the corresponding PMCx;
1540 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1541 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1542 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1546 * The real steps we choose are a little different from above.
1547 * A) To reduce MSR operations, we don't run step 1) as they
1548 * are already cleared before this function is called;
1549 * B) Call x86_perf_event_update to save PMCx before configuring
1550 * PERFEVTSELx with magic number;
1551 * C) With step 5), we do clear only when the PERFEVTSELx is
1552 * not used currently.
1553 * D) Call x86_perf_event_set_period to restore PMCx;
1556 /* We always operate 4 pairs of PERF Counters */
1557 for (i = 0; i < 4; i++) {
1558 event = cpuc->events[i];
1560 x86_perf_event_update(event);
1563 for (i = 0; i < 4; i++) {
1564 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
1565 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
1568 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
1569 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
1571 for (i = 0; i < 4; i++) {
1572 event = cpuc->events[i];
1575 x86_perf_event_set_period(event);
1576 __x86_pmu_enable_event(&event->hw,
1577 ARCH_PERFMON_EVENTSEL_ENABLE);
1579 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
1583 static void intel_pmu_nhm_enable_all(int added)
1586 intel_pmu_nhm_workaround();
1587 intel_pmu_enable_all(added);
1590 static inline u64 intel_pmu_get_status(void)
1594 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1599 static inline void intel_pmu_ack_status(u64 ack)
1601 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1604 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
1606 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
1609 mask = 0xfULL << (idx * 4);
1611 rdmsrl(hwc->config_base, ctrl_val);
1613 wrmsrl(hwc->config_base, ctrl_val);
1616 static inline bool event_is_checkpointed(struct perf_event *event)
1618 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
1621 static void intel_pmu_disable_event(struct perf_event *event)
1623 struct hw_perf_event *hwc = &event->hw;
1624 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1626 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
1627 intel_pmu_disable_bts();
1628 intel_pmu_drain_bts_buffer();
1632 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
1633 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
1634 cpuc->intel_cp_status &= ~(1ull << hwc->idx);
1637 * must disable before any actual event
1638 * because any event may be combined with LBR
1640 if (needs_branch_stack(event))
1641 intel_pmu_lbr_disable(event);
1643 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1644 intel_pmu_disable_fixed(hwc);
1648 x86_pmu_disable_event(event);
1650 if (unlikely(event->attr.precise_ip))
1651 intel_pmu_pebs_disable(event);
1654 static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
1656 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
1657 u64 ctrl_val, bits, mask;
1660 * Enable IRQ generation (0x8),
1661 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1665 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1667 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1671 * ANY bit is supported in v3 and up
1673 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1677 mask = 0xfULL << (idx * 4);
1679 rdmsrl(hwc->config_base, ctrl_val);
1682 wrmsrl(hwc->config_base, ctrl_val);
1685 static void intel_pmu_enable_event(struct perf_event *event)
1687 struct hw_perf_event *hwc = &event->hw;
1688 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1690 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
1691 if (!__this_cpu_read(cpu_hw_events.enabled))
1694 intel_pmu_enable_bts(hwc->config);
1698 * must enabled before any actual event
1699 * because any event may be combined with LBR
1701 if (needs_branch_stack(event))
1702 intel_pmu_lbr_enable(event);
1704 if (event->attr.exclude_host)
1705 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
1706 if (event->attr.exclude_guest)
1707 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
1709 if (unlikely(event_is_checkpointed(event)))
1710 cpuc->intel_cp_status |= (1ull << hwc->idx);
1712 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1713 intel_pmu_enable_fixed(hwc);
1717 if (unlikely(event->attr.precise_ip))
1718 intel_pmu_pebs_enable(event);
1720 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1724 * Save and restart an expired event. Called by NMI contexts,
1725 * so it has to be careful about preempting normal event ops:
1727 int intel_pmu_save_and_restart(struct perf_event *event)
1729 x86_perf_event_update(event);
1731 * For a checkpointed counter always reset back to 0. This
1732 * avoids a situation where the counter overflows, aborts the
1733 * transaction and is then set back to shortly before the
1734 * overflow, and overflows and aborts again.
1736 if (unlikely(event_is_checkpointed(event))) {
1737 /* No race with NMIs because the counter should not be armed */
1738 wrmsrl(event->hw.event_base, 0);
1739 local64_set(&event->hw.prev_count, 0);
1741 return x86_perf_event_set_period(event);
1744 static void intel_pmu_reset(void)
1746 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
1747 unsigned long flags;
1750 if (!x86_pmu.num_counters)
1753 local_irq_save(flags);
1755 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1757 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1758 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
1759 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
1761 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
1762 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1765 ds->bts_index = ds->bts_buffer_base;
1767 /* Ack all overflows and disable fixed counters */
1768 if (x86_pmu.version >= 2) {
1769 intel_pmu_ack_status(intel_pmu_get_status());
1770 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1773 /* Reset LBRs and LBR freezing */
1774 if (x86_pmu.lbr_nr) {
1775 update_debugctlmsr(get_debugctlmsr() &
1776 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
1779 local_irq_restore(flags);
1783 * This handler is triggered by the local APIC, so the APIC IRQ handling
1786 static int intel_pmu_handle_irq(struct pt_regs *regs)
1788 struct perf_sample_data data;
1789 struct cpu_hw_events *cpuc;
1794 cpuc = this_cpu_ptr(&cpu_hw_events);
1797 * No known reason to not always do late ACK,
1798 * but just in case do it opt-in.
1800 if (!x86_pmu.late_ack)
1801 apic_write(APIC_LVTPC, APIC_DM_NMI);
1802 __intel_pmu_disable_all();
1803 handled = intel_pmu_drain_bts_buffer();
1804 handled += intel_bts_interrupt();
1805 status = intel_pmu_get_status();
1811 intel_pmu_lbr_read();
1812 intel_pmu_ack_status(status);
1813 if (++loops > 100) {
1814 static bool warned = false;
1816 WARN(1, "perfevents: irq loop stuck!\n");
1817 perf_event_print_debug();
1824 inc_irq_stat(apic_perf_irqs);
1828 * Ignore a range of extra bits in status that do not indicate
1829 * overflow by themselves.
1831 status &= ~(GLOBAL_STATUS_COND_CHG |
1832 GLOBAL_STATUS_ASIF |
1833 GLOBAL_STATUS_LBRS_FROZEN);
1838 * PEBS overflow sets bit 62 in the global status register
1840 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
1842 x86_pmu.drain_pebs(regs);
1848 if (__test_and_clear_bit(55, (unsigned long *)&status)) {
1850 intel_pt_interrupt();
1854 * Checkpointed counters can lead to 'spurious' PMIs because the
1855 * rollback caused by the PMI will have cleared the overflow status
1856 * bit. Therefore always force probe these counters.
1858 status |= cpuc->intel_cp_status;
1860 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1861 struct perf_event *event = cpuc->events[bit];
1865 if (!test_bit(bit, cpuc->active_mask))
1868 if (!intel_pmu_save_and_restart(event))
1871 perf_sample_data_init(&data, 0, event->hw.last_period);
1873 if (has_branch_stack(event))
1874 data.br_stack = &cpuc->lbr_stack;
1876 if (perf_event_overflow(event, &data, regs))
1877 x86_pmu_stop(event, 0);
1881 * Repeat if there is more work to be done:
1883 status = intel_pmu_get_status();
1888 __intel_pmu_enable_all(0, true);
1890 * Only unmask the NMI after the overflow counters
1891 * have been reset. This avoids spurious NMIs on
1894 if (x86_pmu.late_ack)
1895 apic_write(APIC_LVTPC, APIC_DM_NMI);
1899 static struct event_constraint *
1900 intel_bts_constraints(struct perf_event *event)
1902 struct hw_perf_event *hwc = &event->hw;
1903 unsigned int hw_event, bts_event;
1905 if (event->attr.freq)
1908 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1909 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
1911 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
1912 return &bts_constraint;
1917 static int intel_alt_er(int idx, u64 config)
1920 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
1923 if (idx == EXTRA_REG_RSP_0)
1924 alt_idx = EXTRA_REG_RSP_1;
1926 if (idx == EXTRA_REG_RSP_1)
1927 alt_idx = EXTRA_REG_RSP_0;
1929 if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask)
1935 static void intel_fixup_er(struct perf_event *event, int idx)
1937 event->hw.extra_reg.idx = idx;
1939 if (idx == EXTRA_REG_RSP_0) {
1940 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1941 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
1942 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
1943 } else if (idx == EXTRA_REG_RSP_1) {
1944 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1945 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
1946 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
1951 * manage allocation of shared extra msr for certain events
1954 * per-cpu: to be shared between the various events on a single PMU
1955 * per-core: per-cpu + shared by HT threads
1957 static struct event_constraint *
1958 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
1959 struct perf_event *event,
1960 struct hw_perf_event_extra *reg)
1962 struct event_constraint *c = &emptyconstraint;
1963 struct er_account *era;
1964 unsigned long flags;
1968 * reg->alloc can be set due to existing state, so for fake cpuc we
1969 * need to ignore this, otherwise we might fail to allocate proper fake
1970 * state for this extra reg constraint. Also see the comment below.
1972 if (reg->alloc && !cpuc->is_fake)
1973 return NULL; /* call x86_get_event_constraint() */
1976 era = &cpuc->shared_regs->regs[idx];
1978 * we use spin_lock_irqsave() to avoid lockdep issues when
1979 * passing a fake cpuc
1981 raw_spin_lock_irqsave(&era->lock, flags);
1983 if (!atomic_read(&era->ref) || era->config == reg->config) {
1986 * If its a fake cpuc -- as per validate_{group,event}() we
1987 * shouldn't touch event state and we can avoid doing so
1988 * since both will only call get_event_constraints() once
1989 * on each event, this avoids the need for reg->alloc.
1991 * Not doing the ER fixup will only result in era->reg being
1992 * wrong, but since we won't actually try and program hardware
1993 * this isn't a problem either.
1995 if (!cpuc->is_fake) {
1996 if (idx != reg->idx)
1997 intel_fixup_er(event, idx);
2000 * x86_schedule_events() can call get_event_constraints()
2001 * multiple times on events in the case of incremental
2002 * scheduling(). reg->alloc ensures we only do the ER
2008 /* lock in msr value */
2009 era->config = reg->config;
2010 era->reg = reg->reg;
2013 atomic_inc(&era->ref);
2016 * need to call x86_get_event_constraint()
2017 * to check if associated event has constraints
2021 idx = intel_alt_er(idx, reg->config);
2022 if (idx != reg->idx) {
2023 raw_spin_unlock_irqrestore(&era->lock, flags);
2027 raw_spin_unlock_irqrestore(&era->lock, flags);
2033 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
2034 struct hw_perf_event_extra *reg)
2036 struct er_account *era;
2039 * Only put constraint if extra reg was actually allocated. Also takes
2040 * care of event which do not use an extra shared reg.
2042 * Also, if this is a fake cpuc we shouldn't touch any event state
2043 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
2044 * either since it'll be thrown out.
2046 if (!reg->alloc || cpuc->is_fake)
2049 era = &cpuc->shared_regs->regs[reg->idx];
2051 /* one fewer user */
2052 atomic_dec(&era->ref);
2054 /* allocate again next time */
2058 static struct event_constraint *
2059 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
2060 struct perf_event *event)
2062 struct event_constraint *c = NULL, *d;
2063 struct hw_perf_event_extra *xreg, *breg;
2065 xreg = &event->hw.extra_reg;
2066 if (xreg->idx != EXTRA_REG_NONE) {
2067 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
2068 if (c == &emptyconstraint)
2071 breg = &event->hw.branch_reg;
2072 if (breg->idx != EXTRA_REG_NONE) {
2073 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
2074 if (d == &emptyconstraint) {
2075 __intel_shared_reg_put_constraints(cpuc, xreg);
2082 struct event_constraint *
2083 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2084 struct perf_event *event)
2086 struct event_constraint *c;
2088 if (x86_pmu.event_constraints) {
2089 for_each_event_constraint(c, x86_pmu.event_constraints) {
2090 if ((event->hw.config & c->cmask) == c->code) {
2091 event->hw.flags |= c->flags;
2097 return &unconstrained;
2100 static struct event_constraint *
2101 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2102 struct perf_event *event)
2104 struct event_constraint *c;
2106 c = intel_bts_constraints(event);
2110 c = intel_shared_regs_constraints(cpuc, event);
2114 c = intel_pebs_constraints(event);
2118 return x86_get_event_constraints(cpuc, idx, event);
2122 intel_start_scheduling(struct cpu_hw_events *cpuc)
2124 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2125 struct intel_excl_states *xl;
2126 int tid = cpuc->excl_thread_id;
2129 * nothing needed if in group validation mode
2131 if (cpuc->is_fake || !is_ht_workaround_enabled())
2135 * no exclusion needed
2137 if (WARN_ON_ONCE(!excl_cntrs))
2140 xl = &excl_cntrs->states[tid];
2142 xl->sched_started = true;
2144 * lock shared state until we are done scheduling
2145 * in stop_event_scheduling()
2146 * makes scheduling appear as a transaction
2148 raw_spin_lock(&excl_cntrs->lock);
2151 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2153 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2154 struct event_constraint *c = cpuc->event_constraint[idx];
2155 struct intel_excl_states *xl;
2156 int tid = cpuc->excl_thread_id;
2158 if (cpuc->is_fake || !is_ht_workaround_enabled())
2161 if (WARN_ON_ONCE(!excl_cntrs))
2164 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
2167 xl = &excl_cntrs->states[tid];
2169 lockdep_assert_held(&excl_cntrs->lock);
2171 if (c->flags & PERF_X86_EVENT_EXCL)
2172 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
2174 xl->state[cntr] = INTEL_EXCL_SHARED;
2178 intel_stop_scheduling(struct cpu_hw_events *cpuc)
2180 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2181 struct intel_excl_states *xl;
2182 int tid = cpuc->excl_thread_id;
2185 * nothing needed if in group validation mode
2187 if (cpuc->is_fake || !is_ht_workaround_enabled())
2190 * no exclusion needed
2192 if (WARN_ON_ONCE(!excl_cntrs))
2195 xl = &excl_cntrs->states[tid];
2197 xl->sched_started = false;
2199 * release shared state lock (acquired in intel_start_scheduling())
2201 raw_spin_unlock(&excl_cntrs->lock);
2204 static struct event_constraint *
2205 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
2206 int idx, struct event_constraint *c)
2208 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2209 struct intel_excl_states *xlo;
2210 int tid = cpuc->excl_thread_id;
2214 * validating a group does not require
2215 * enforcing cross-thread exclusion
2217 if (cpuc->is_fake || !is_ht_workaround_enabled())
2221 * no exclusion needed
2223 if (WARN_ON_ONCE(!excl_cntrs))
2227 * because we modify the constraint, we need
2228 * to make a copy. Static constraints come
2229 * from static const tables.
2231 * only needed when constraint has not yet
2232 * been cloned (marked dynamic)
2234 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
2235 struct event_constraint *cx;
2238 * grab pre-allocated constraint entry
2240 cx = &cpuc->constraint_list[idx];
2243 * initialize dynamic constraint
2244 * with static constraint
2249 * mark constraint as dynamic, so we
2250 * can free it later on
2252 cx->flags |= PERF_X86_EVENT_DYNAMIC;
2257 * From here on, the constraint is dynamic.
2258 * Either it was just allocated above, or it
2259 * was allocated during a earlier invocation
2264 * state of sibling HT
2266 xlo = &excl_cntrs->states[tid ^ 1];
2269 * event requires exclusive counter access
2272 is_excl = c->flags & PERF_X86_EVENT_EXCL;
2273 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
2274 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
2275 if (!cpuc->n_excl++)
2276 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
2280 * Modify static constraint with current dynamic
2283 * EXCLUSIVE: sibling counter measuring exclusive event
2284 * SHARED : sibling counter measuring non-exclusive event
2285 * UNUSED : sibling counter unused
2287 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
2289 * exclusive event in sibling counter
2290 * our corresponding counter cannot be used
2291 * regardless of our event
2293 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE)
2294 __clear_bit(i, c->idxmsk);
2296 * if measuring an exclusive event, sibling
2297 * measuring non-exclusive, then counter cannot
2300 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED)
2301 __clear_bit(i, c->idxmsk);
2305 * recompute actual bit weight for scheduling algorithm
2307 c->weight = hweight64(c->idxmsk64);
2310 * if we return an empty mask, then switch
2311 * back to static empty constraint to avoid
2312 * the cost of freeing later on
2315 c = &emptyconstraint;
2320 static struct event_constraint *
2321 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2322 struct perf_event *event)
2324 struct event_constraint *c1 = NULL;
2325 struct event_constraint *c2;
2327 if (idx >= 0) /* fake does < 0 */
2328 c1 = cpuc->event_constraint[idx];
2332 * - static constraint: no change across incremental scheduling calls
2333 * - dynamic constraint: handled by intel_get_excl_constraints()
2335 c2 = __intel_get_event_constraints(cpuc, idx, event);
2336 if (c1 && (c1->flags & PERF_X86_EVENT_DYNAMIC)) {
2337 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
2338 c1->weight = c2->weight;
2342 if (cpuc->excl_cntrs)
2343 return intel_get_excl_constraints(cpuc, event, idx, c2);
2348 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
2349 struct perf_event *event)
2351 struct hw_perf_event *hwc = &event->hw;
2352 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2353 int tid = cpuc->excl_thread_id;
2354 struct intel_excl_states *xl;
2357 * nothing needed if in group validation mode
2362 if (WARN_ON_ONCE(!excl_cntrs))
2365 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
2366 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
2367 if (!--cpuc->n_excl)
2368 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
2372 * If event was actually assigned, then mark the counter state as
2375 if (hwc->idx >= 0) {
2376 xl = &excl_cntrs->states[tid];
2379 * put_constraint may be called from x86_schedule_events()
2380 * which already has the lock held so here make locking
2383 if (!xl->sched_started)
2384 raw_spin_lock(&excl_cntrs->lock);
2386 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
2388 if (!xl->sched_started)
2389 raw_spin_unlock(&excl_cntrs->lock);
2394 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
2395 struct perf_event *event)
2397 struct hw_perf_event_extra *reg;
2399 reg = &event->hw.extra_reg;
2400 if (reg->idx != EXTRA_REG_NONE)
2401 __intel_shared_reg_put_constraints(cpuc, reg);
2403 reg = &event->hw.branch_reg;
2404 if (reg->idx != EXTRA_REG_NONE)
2405 __intel_shared_reg_put_constraints(cpuc, reg);
2408 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
2409 struct perf_event *event)
2411 intel_put_shared_regs_event_constraints(cpuc, event);
2414 * is PMU has exclusive counter restrictions, then
2415 * all events are subject to and must call the
2416 * put_excl_constraints() routine
2418 if (cpuc->excl_cntrs)
2419 intel_put_excl_constraints(cpuc, event);
2422 static void intel_pebs_aliases_core2(struct perf_event *event)
2424 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
2426 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
2427 * (0x003c) so that we can use it with PEBS.
2429 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
2430 * PEBS capable. However we can use INST_RETIRED.ANY_P
2431 * (0x00c0), which is a PEBS capable event, to get the same
2434 * INST_RETIRED.ANY_P counts the number of cycles that retires
2435 * CNTMASK instructions. By setting CNTMASK to a value (16)
2436 * larger than the maximum number of instructions that can be
2437 * retired per cycle (4) and then inverting the condition, we
2438 * count all cycles that retire 16 or less instructions, which
2441 * Thereby we gain a PEBS capable cycle counter.
2443 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
2445 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
2446 event->hw.config = alt_config;
2450 static void intel_pebs_aliases_snb(struct perf_event *event)
2452 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
2454 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
2455 * (0x003c) so that we can use it with PEBS.
2457 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
2458 * PEBS capable. However we can use UOPS_RETIRED.ALL
2459 * (0x01c2), which is a PEBS capable event, to get the same
2462 * UOPS_RETIRED.ALL counts the number of cycles that retires
2463 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
2464 * larger than the maximum number of micro-ops that can be
2465 * retired per cycle (4) and then inverting the condition, we
2466 * count all cycles that retire 16 or less micro-ops, which
2469 * Thereby we gain a PEBS capable cycle counter.
2471 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
2473 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
2474 event->hw.config = alt_config;
2478 static unsigned long intel_pmu_free_running_flags(struct perf_event *event)
2480 unsigned long flags = x86_pmu.free_running_flags;
2482 if (event->attr.use_clockid)
2483 flags &= ~PERF_SAMPLE_TIME;
2487 static int intel_pmu_hw_config(struct perf_event *event)
2489 int ret = x86_pmu_hw_config(event);
2494 if (event->attr.precise_ip) {
2495 if (!event->attr.freq) {
2496 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
2497 if (!(event->attr.sample_type &
2498 ~intel_pmu_free_running_flags(event)))
2499 event->hw.flags |= PERF_X86_EVENT_FREERUNNING;
2501 if (x86_pmu.pebs_aliases)
2502 x86_pmu.pebs_aliases(event);
2505 if (needs_branch_stack(event)) {
2506 ret = intel_pmu_setup_lbr_filter(event);
2511 * BTS is set up earlier in this path, so don't account twice
2513 if (!intel_pmu_has_bts(event)) {
2514 /* disallow lbr if conflicting events are present */
2515 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
2518 event->destroy = hw_perf_lbr_event_destroy;
2522 if (event->attr.type != PERF_TYPE_RAW)
2525 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
2528 if (x86_pmu.version < 3)
2531 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
2534 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
2539 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
2541 if (x86_pmu.guest_get_msrs)
2542 return x86_pmu.guest_get_msrs(nr);
2546 EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
2548 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
2550 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2551 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
2553 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
2554 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
2555 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
2557 * If PMU counter has PEBS enabled it is not enough to disable counter
2558 * on a guest entry since PEBS memory write can overshoot guest entry
2559 * and corrupt guest memory. Disabling PEBS solves the problem.
2561 arr[1].msr = MSR_IA32_PEBS_ENABLE;
2562 arr[1].host = cpuc->pebs_enabled;
2569 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
2571 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2572 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
2575 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2576 struct perf_event *event = cpuc->events[idx];
2578 arr[idx].msr = x86_pmu_config_addr(idx);
2579 arr[idx].host = arr[idx].guest = 0;
2581 if (!test_bit(idx, cpuc->active_mask))
2584 arr[idx].host = arr[idx].guest =
2585 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
2587 if (event->attr.exclude_host)
2588 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
2589 else if (event->attr.exclude_guest)
2590 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
2593 *nr = x86_pmu.num_counters;
2597 static void core_pmu_enable_event(struct perf_event *event)
2599 if (!event->attr.exclude_host)
2600 x86_pmu_enable_event(event);
2603 static void core_pmu_enable_all(int added)
2605 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2608 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2609 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
2611 if (!test_bit(idx, cpuc->active_mask) ||
2612 cpuc->events[idx]->attr.exclude_host)
2615 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2619 static int hsw_hw_config(struct perf_event *event)
2621 int ret = intel_pmu_hw_config(event);
2625 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
2627 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
2630 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
2631 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
2634 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
2635 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
2636 event->attr.precise_ip > 0))
2639 if (event_is_checkpointed(event)) {
2641 * Sampling of checkpointed events can cause situations where
2642 * the CPU constantly aborts because of a overflow, which is
2643 * then checkpointed back and ignored. Forbid checkpointing
2646 * But still allow a long sampling period, so that perf stat
2649 if (event->attr.sample_period > 0 &&
2650 event->attr.sample_period < 0x7fffffff)
2656 static struct event_constraint counter2_constraint =
2657 EVENT_CONSTRAINT(0, 0x4, 0);
2659 static struct event_constraint *
2660 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2661 struct perf_event *event)
2663 struct event_constraint *c;
2665 c = intel_get_event_constraints(cpuc, idx, event);
2667 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
2668 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
2669 if (c->idxmsk64 & (1U << 2))
2670 return &counter2_constraint;
2671 return &emptyconstraint;
2680 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
2681 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
2682 * the two to enforce a minimum period of 128 (the smallest value that has bits
2683 * 0-5 cleared and >= 100).
2685 * Because of how the code in x86_perf_event_set_period() works, the truncation
2686 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
2687 * to make up for the 'lost' events due to carrying the 'error' in period_left.
2689 * Therefore the effective (average) period matches the requested period,
2690 * despite coarser hardware granularity.
2692 static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
2694 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
2695 X86_CONFIG(.event=0xc0, .umask=0x01)) {
2703 PMU_FORMAT_ATTR(event, "config:0-7" );
2704 PMU_FORMAT_ATTR(umask, "config:8-15" );
2705 PMU_FORMAT_ATTR(edge, "config:18" );
2706 PMU_FORMAT_ATTR(pc, "config:19" );
2707 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
2708 PMU_FORMAT_ATTR(inv, "config:23" );
2709 PMU_FORMAT_ATTR(cmask, "config:24-31" );
2710 PMU_FORMAT_ATTR(in_tx, "config:32");
2711 PMU_FORMAT_ATTR(in_tx_cp, "config:33");
2713 static struct attribute *intel_arch_formats_attr[] = {
2714 &format_attr_event.attr,
2715 &format_attr_umask.attr,
2716 &format_attr_edge.attr,
2717 &format_attr_pc.attr,
2718 &format_attr_inv.attr,
2719 &format_attr_cmask.attr,
2723 ssize_t intel_event_sysfs_show(char *page, u64 config)
2725 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
2727 return x86_event_sysfs_show(page, config, event);
2730 struct intel_shared_regs *allocate_shared_regs(int cpu)
2732 struct intel_shared_regs *regs;
2735 regs = kzalloc_node(sizeof(struct intel_shared_regs),
2736 GFP_KERNEL, cpu_to_node(cpu));
2739 * initialize the locks to keep lockdep happy
2741 for (i = 0; i < EXTRA_REG_MAX; i++)
2742 raw_spin_lock_init(®s->regs[i].lock);
2749 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
2751 struct intel_excl_cntrs *c;
2753 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
2754 GFP_KERNEL, cpu_to_node(cpu));
2756 raw_spin_lock_init(&c->lock);
2762 static int intel_pmu_cpu_prepare(int cpu)
2764 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2766 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
2767 cpuc->shared_regs = allocate_shared_regs(cpu);
2768 if (!cpuc->shared_regs)
2772 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
2773 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
2775 cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
2776 if (!cpuc->constraint_list)
2777 goto err_shared_regs;
2779 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
2780 if (!cpuc->excl_cntrs)
2781 goto err_constraint_list;
2783 cpuc->excl_thread_id = 0;
2788 err_constraint_list:
2789 kfree(cpuc->constraint_list);
2790 cpuc->constraint_list = NULL;
2793 kfree(cpuc->shared_regs);
2794 cpuc->shared_regs = NULL;
2800 static void intel_pmu_cpu_starting(int cpu)
2802 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2803 int core_id = topology_core_id(cpu);
2806 init_debug_store_on_cpu(cpu);
2808 * Deal with CPUs that don't clear their LBRs on power-up.
2810 intel_pmu_lbr_reset();
2812 cpuc->lbr_sel = NULL;
2814 if (!cpuc->shared_regs)
2817 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
2818 void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
2820 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
2821 struct intel_shared_regs *pc;
2823 pc = per_cpu(cpu_hw_events, i).shared_regs;
2824 if (pc && pc->core_id == core_id) {
2825 *onln = cpuc->shared_regs;
2826 cpuc->shared_regs = pc;
2830 cpuc->shared_regs->core_id = core_id;
2831 cpuc->shared_regs->refcnt++;
2834 if (x86_pmu.lbr_sel_map)
2835 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
2837 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
2838 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
2839 struct intel_excl_cntrs *c;
2841 c = per_cpu(cpu_hw_events, i).excl_cntrs;
2842 if (c && c->core_id == core_id) {
2843 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
2844 cpuc->excl_cntrs = c;
2845 cpuc->excl_thread_id = 1;
2849 cpuc->excl_cntrs->core_id = core_id;
2850 cpuc->excl_cntrs->refcnt++;
2854 static void free_excl_cntrs(int cpu)
2856 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2857 struct intel_excl_cntrs *c;
2859 c = cpuc->excl_cntrs;
2861 if (c->core_id == -1 || --c->refcnt == 0)
2863 cpuc->excl_cntrs = NULL;
2864 kfree(cpuc->constraint_list);
2865 cpuc->constraint_list = NULL;
2869 static void intel_pmu_cpu_dying(int cpu)
2871 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2872 struct intel_shared_regs *pc;
2874 pc = cpuc->shared_regs;
2876 if (pc->core_id == -1 || --pc->refcnt == 0)
2878 cpuc->shared_regs = NULL;
2881 free_excl_cntrs(cpu);
2883 fini_debug_store_on_cpu(cpu);
2886 static void intel_pmu_sched_task(struct perf_event_context *ctx,
2889 if (x86_pmu.pebs_active)
2890 intel_pmu_pebs_sched_task(ctx, sched_in);
2892 intel_pmu_lbr_sched_task(ctx, sched_in);
2895 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
2897 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
2899 PMU_FORMAT_ATTR(frontend, "config1:0-23");
2901 static struct attribute *intel_arch3_formats_attr[] = {
2902 &format_attr_event.attr,
2903 &format_attr_umask.attr,
2904 &format_attr_edge.attr,
2905 &format_attr_pc.attr,
2906 &format_attr_any.attr,
2907 &format_attr_inv.attr,
2908 &format_attr_cmask.attr,
2909 &format_attr_in_tx.attr,
2910 &format_attr_in_tx_cp.attr,
2912 &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
2913 &format_attr_ldlat.attr, /* PEBS load latency */
2917 static struct attribute *skl_format_attr[] = {
2918 &format_attr_frontend.attr,
2922 static __initconst const struct x86_pmu core_pmu = {
2924 .handle_irq = x86_pmu_handle_irq,
2925 .disable_all = x86_pmu_disable_all,
2926 .enable_all = core_pmu_enable_all,
2927 .enable = core_pmu_enable_event,
2928 .disable = x86_pmu_disable_event,
2929 .hw_config = x86_pmu_hw_config,
2930 .schedule_events = x86_schedule_events,
2931 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2932 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2933 .event_map = intel_pmu_event_map,
2934 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2936 .free_running_flags = PEBS_FREERUNNING_FLAGS,
2939 * Intel PMCs cannot be accessed sanely above 32-bit width,
2940 * so we install an artificial 1<<31 period regardless of
2941 * the generic event period:
2943 .max_period = (1ULL<<31) - 1,
2944 .get_event_constraints = intel_get_event_constraints,
2945 .put_event_constraints = intel_put_event_constraints,
2946 .event_constraints = intel_core_event_constraints,
2947 .guest_get_msrs = core_guest_get_msrs,
2948 .format_attrs = intel_arch_formats_attr,
2949 .events_sysfs_show = intel_event_sysfs_show,
2952 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
2953 * together with PMU version 1 and thus be using core_pmu with
2954 * shared_regs. We need following callbacks here to allocate
2957 .cpu_prepare = intel_pmu_cpu_prepare,
2958 .cpu_starting = intel_pmu_cpu_starting,
2959 .cpu_dying = intel_pmu_cpu_dying,
2962 static __initconst const struct x86_pmu intel_pmu = {
2964 .handle_irq = intel_pmu_handle_irq,
2965 .disable_all = intel_pmu_disable_all,
2966 .enable_all = intel_pmu_enable_all,
2967 .enable = intel_pmu_enable_event,
2968 .disable = intel_pmu_disable_event,
2969 .hw_config = intel_pmu_hw_config,
2970 .schedule_events = x86_schedule_events,
2971 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2972 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2973 .event_map = intel_pmu_event_map,
2974 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2976 .free_running_flags = PEBS_FREERUNNING_FLAGS,
2978 * Intel PMCs cannot be accessed sanely above 32 bit width,
2979 * so we install an artificial 1<<31 period regardless of
2980 * the generic event period:
2982 .max_period = (1ULL << 31) - 1,
2983 .get_event_constraints = intel_get_event_constraints,
2984 .put_event_constraints = intel_put_event_constraints,
2985 .pebs_aliases = intel_pebs_aliases_core2,
2987 .format_attrs = intel_arch3_formats_attr,
2988 .events_sysfs_show = intel_event_sysfs_show,
2990 .cpu_prepare = intel_pmu_cpu_prepare,
2991 .cpu_starting = intel_pmu_cpu_starting,
2992 .cpu_dying = intel_pmu_cpu_dying,
2993 .guest_get_msrs = intel_guest_get_msrs,
2994 .sched_task = intel_pmu_sched_task,
2997 static __init void intel_clovertown_quirk(void)
3000 * PEBS is unreliable due to:
3002 * AJ67 - PEBS may experience CPL leaks
3003 * AJ68 - PEBS PMI may be delayed by one event
3004 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
3005 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
3007 * AJ67 could be worked around by restricting the OS/USR flags.
3008 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
3010 * AJ106 could possibly be worked around by not allowing LBR
3011 * usage from PEBS, including the fixup.
3012 * AJ68 could possibly be worked around by always programming
3013 * a pebs_event_reset[0] value and coping with the lost events.
3015 * But taken together it might just make sense to not enable PEBS on
3018 pr_warn("PEBS disabled due to CPU errata\n");
3020 x86_pmu.pebs_constraints = NULL;
3023 static int intel_snb_pebs_broken(int cpu)
3025 u32 rev = UINT_MAX; /* default to broken for unknown models */
3027 switch (cpu_data(cpu).x86_model) {
3032 case 45: /* SNB-EP */
3033 switch (cpu_data(cpu).x86_mask) {
3034 case 6: rev = 0x618; break;
3035 case 7: rev = 0x70c; break;
3039 return (cpu_data(cpu).microcode < rev);
3042 static void intel_snb_check_microcode(void)
3044 int pebs_broken = 0;
3048 for_each_online_cpu(cpu) {
3049 if ((pebs_broken = intel_snb_pebs_broken(cpu)))
3054 if (pebs_broken == x86_pmu.pebs_broken)
3058 * Serialized by the microcode lock..
3060 if (x86_pmu.pebs_broken) {
3061 pr_info("PEBS enabled due to microcode update\n");
3062 x86_pmu.pebs_broken = 0;
3064 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
3065 x86_pmu.pebs_broken = 1;
3070 * Under certain circumstances, access certain MSR may cause #GP.
3071 * The function tests if the input MSR can be safely accessed.
3073 static bool check_msr(unsigned long msr, u64 mask)
3075 u64 val_old, val_new, val_tmp;
3078 * Read the current value, change it and read it back to see if it
3079 * matches, this is needed to detect certain hardware emulators
3080 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
3082 if (rdmsrl_safe(msr, &val_old))
3086 * Only change the bits which can be updated by wrmsrl.
3088 val_tmp = val_old ^ mask;
3089 if (wrmsrl_safe(msr, val_tmp) ||
3090 rdmsrl_safe(msr, &val_new))
3093 if (val_new != val_tmp)
3096 /* Here it's sure that the MSR can be safely accessed.
3097 * Restore the old value and return.
3099 wrmsrl(msr, val_old);
3104 static __init void intel_sandybridge_quirk(void)
3106 x86_pmu.check_microcode = intel_snb_check_microcode;
3107 intel_snb_check_microcode();
3110 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
3111 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
3112 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
3113 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
3114 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
3115 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
3116 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
3117 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
3120 static __init void intel_arch_events_quirk(void)
3124 /* disable event that reported as not presend by cpuid */
3125 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
3126 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
3127 pr_warn("CPUID marked event: \'%s\' unavailable\n",
3128 intel_arch_events_map[bit].name);
3132 static __init void intel_nehalem_quirk(void)
3134 union cpuid10_ebx ebx;
3136 ebx.full = x86_pmu.events_maskl;
3137 if (ebx.split.no_branch_misses_retired) {
3139 * Erratum AAJ80 detected, we work it around by using
3140 * the BR_MISP_EXEC.ANY event. This will over-count
3141 * branch-misses, but it's still much better than the
3142 * architectural event which is often completely bogus:
3144 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
3145 ebx.split.no_branch_misses_retired = 0;
3146 x86_pmu.events_maskl = ebx.full;
3147 pr_info("CPU erratum AAJ80 worked around\n");
3152 * enable software workaround for errata:
3157 * Only needed when HT is enabled. However detecting
3158 * if HT is enabled is difficult (model specific). So instead,
3159 * we enable the workaround in the early boot, and verify if
3160 * it is needed in a later initcall phase once we have valid
3161 * topology information to check if HT is actually enabled
3163 static __init void intel_ht_bug(void)
3165 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
3167 x86_pmu.start_scheduling = intel_start_scheduling;
3168 x86_pmu.commit_scheduling = intel_commit_scheduling;
3169 x86_pmu.stop_scheduling = intel_stop_scheduling;
3172 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
3173 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
3175 /* Haswell special events */
3176 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
3177 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
3178 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
3179 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
3180 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
3181 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
3182 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
3183 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
3184 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
3185 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
3186 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
3187 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
3189 static struct attribute *hsw_events_attrs[] = {
3190 EVENT_PTR(tx_start),
3191 EVENT_PTR(tx_commit),
3192 EVENT_PTR(tx_abort),
3193 EVENT_PTR(tx_capacity),
3194 EVENT_PTR(tx_conflict),
3195 EVENT_PTR(el_start),
3196 EVENT_PTR(el_commit),
3197 EVENT_PTR(el_abort),
3198 EVENT_PTR(el_capacity),
3199 EVENT_PTR(el_conflict),
3200 EVENT_PTR(cycles_t),
3201 EVENT_PTR(cycles_ct),
3202 EVENT_PTR(mem_ld_hsw),
3203 EVENT_PTR(mem_st_hsw),
3207 __init int intel_pmu_init(void)
3209 union cpuid10_edx edx;
3210 union cpuid10_eax eax;
3211 union cpuid10_ebx ebx;
3212 struct event_constraint *c;
3213 unsigned int unused;
3214 struct extra_reg *er;
3217 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
3218 switch (boot_cpu_data.x86) {
3220 return p6_pmu_init();
3222 return knc_pmu_init();
3224 return p4_pmu_init();
3230 * Check whether the Architectural PerfMon supports
3231 * Branch Misses Retired hw_event or not.
3233 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
3234 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
3237 version = eax.split.version_id;
3241 x86_pmu = intel_pmu;
3243 x86_pmu.version = version;
3244 x86_pmu.num_counters = eax.split.num_counters;
3245 x86_pmu.cntval_bits = eax.split.bit_width;
3246 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
3248 x86_pmu.events_maskl = ebx.full;
3249 x86_pmu.events_mask_len = eax.split.mask_length;
3251 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
3254 * Quirk: v2 perfmon does not report fixed-purpose events, so
3255 * assume at least 3 events:
3258 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
3260 if (boot_cpu_has(X86_FEATURE_PDCM)) {
3263 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
3264 x86_pmu.intel_cap.capabilities = capabilities;
3269 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
3272 * Install the hw-cache-events table:
3274 switch (boot_cpu_data.x86_model) {
3275 case 14: /* 65nm Core "Yonah" */
3276 pr_cont("Core events, ");
3279 case 15: /* 65nm Core2 "Merom" */
3280 x86_add_quirk(intel_clovertown_quirk);
3281 case 22: /* 65nm Core2 "Merom-L" */
3282 case 23: /* 45nm Core2 "Penryn" */
3283 case 29: /* 45nm Core2 "Dunnington (MP) */
3284 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
3285 sizeof(hw_cache_event_ids));
3287 intel_pmu_lbr_init_core();
3289 x86_pmu.event_constraints = intel_core2_event_constraints;
3290 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
3291 pr_cont("Core2 events, ");
3294 case 30: /* 45nm Nehalem */
3295 case 26: /* 45nm Nehalem-EP */
3296 case 46: /* 45nm Nehalem-EX */
3297 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
3298 sizeof(hw_cache_event_ids));
3299 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
3300 sizeof(hw_cache_extra_regs));
3302 intel_pmu_lbr_init_nhm();
3304 x86_pmu.event_constraints = intel_nehalem_event_constraints;
3305 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
3306 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
3307 x86_pmu.extra_regs = intel_nehalem_extra_regs;
3309 x86_pmu.cpu_events = nhm_events_attrs;
3311 /* UOPS_ISSUED.STALLED_CYCLES */
3312 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
3313 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
3314 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
3315 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
3316 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
3318 x86_add_quirk(intel_nehalem_quirk);
3320 pr_cont("Nehalem events, ");
3323 case 28: /* 45nm Atom "Pineview" */
3324 case 38: /* 45nm Atom "Lincroft" */
3325 case 39: /* 32nm Atom "Penwell" */
3326 case 53: /* 32nm Atom "Cloverview" */
3327 case 54: /* 32nm Atom "Cedarview" */
3328 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
3329 sizeof(hw_cache_event_ids));
3331 intel_pmu_lbr_init_atom();
3333 x86_pmu.event_constraints = intel_gen_event_constraints;
3334 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
3335 pr_cont("Atom events, ");
3338 case 55: /* 22nm Atom "Silvermont" */
3339 case 76: /* 14nm Atom "Airmont" */
3340 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
3341 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
3342 sizeof(hw_cache_event_ids));
3343 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
3344 sizeof(hw_cache_extra_regs));
3346 intel_pmu_lbr_init_atom();
3348 x86_pmu.event_constraints = intel_slm_event_constraints;
3349 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
3350 x86_pmu.extra_regs = intel_slm_extra_regs;
3351 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3352 pr_cont("Silvermont events, ");
3355 case 37: /* 32nm Westmere */
3356 case 44: /* 32nm Westmere-EP */
3357 case 47: /* 32nm Westmere-EX */
3358 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
3359 sizeof(hw_cache_event_ids));
3360 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
3361 sizeof(hw_cache_extra_regs));
3363 intel_pmu_lbr_init_nhm();
3365 x86_pmu.event_constraints = intel_westmere_event_constraints;
3366 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
3367 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
3368 x86_pmu.extra_regs = intel_westmere_extra_regs;
3369 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3371 x86_pmu.cpu_events = nhm_events_attrs;
3373 /* UOPS_ISSUED.STALLED_CYCLES */
3374 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
3375 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
3376 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
3377 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
3378 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
3380 pr_cont("Westmere events, ");
3383 case 42: /* 32nm SandyBridge */
3384 case 45: /* 32nm SandyBridge-E/EN/EP */
3385 x86_add_quirk(intel_sandybridge_quirk);
3386 x86_add_quirk(intel_ht_bug);
3387 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
3388 sizeof(hw_cache_event_ids));
3389 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
3390 sizeof(hw_cache_extra_regs));
3392 intel_pmu_lbr_init_snb();
3394 x86_pmu.event_constraints = intel_snb_event_constraints;
3395 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
3396 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
3397 if (boot_cpu_data.x86_model == 45)
3398 x86_pmu.extra_regs = intel_snbep_extra_regs;
3400 x86_pmu.extra_regs = intel_snb_extra_regs;
3403 /* all extra regs are per-cpu when HT is on */
3404 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3405 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
3407 x86_pmu.cpu_events = snb_events_attrs;
3409 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
3410 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
3411 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
3412 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
3413 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
3414 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
3416 pr_cont("SandyBridge events, ");
3419 case 58: /* 22nm IvyBridge */
3420 case 62: /* 22nm IvyBridge-EP/EX */
3421 x86_add_quirk(intel_ht_bug);
3422 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
3423 sizeof(hw_cache_event_ids));
3424 /* dTLB-load-misses on IVB is different than SNB */
3425 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
3427 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
3428 sizeof(hw_cache_extra_regs));
3430 intel_pmu_lbr_init_snb();
3432 x86_pmu.event_constraints = intel_ivb_event_constraints;
3433 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
3434 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
3435 if (boot_cpu_data.x86_model == 62)
3436 x86_pmu.extra_regs = intel_snbep_extra_regs;
3438 x86_pmu.extra_regs = intel_snb_extra_regs;
3439 /* all extra regs are per-cpu when HT is on */
3440 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3441 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
3443 x86_pmu.cpu_events = snb_events_attrs;
3445 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
3446 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
3447 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
3449 pr_cont("IvyBridge events, ");
3453 case 60: /* 22nm Haswell Core */
3454 case 63: /* 22nm Haswell Server */
3455 case 69: /* 22nm Haswell ULT */
3456 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
3457 x86_add_quirk(intel_ht_bug);
3458 x86_pmu.late_ack = true;
3459 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
3460 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
3462 intel_pmu_lbr_init_hsw();
3464 x86_pmu.event_constraints = intel_hsw_event_constraints;
3465 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
3466 x86_pmu.extra_regs = intel_snbep_extra_regs;
3467 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
3468 /* all extra regs are per-cpu when HT is on */
3469 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3470 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
3472 x86_pmu.hw_config = hsw_hw_config;
3473 x86_pmu.get_event_constraints = hsw_get_event_constraints;
3474 x86_pmu.cpu_events = hsw_events_attrs;
3475 x86_pmu.lbr_double_abort = true;
3476 pr_cont("Haswell events, ");
3479 case 61: /* 14nm Broadwell Core-M */
3480 case 86: /* 14nm Broadwell Xeon D */
3481 case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
3482 case 79: /* 14nm Broadwell Server */
3483 x86_pmu.late_ack = true;
3484 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
3485 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
3487 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
3488 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
3489 BDW_L3_MISS|HSW_SNOOP_DRAM;
3490 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
3492 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
3493 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
3494 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
3495 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
3497 intel_pmu_lbr_init_hsw();
3499 x86_pmu.event_constraints = intel_bdw_event_constraints;
3500 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
3501 x86_pmu.extra_regs = intel_snbep_extra_regs;
3502 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
3503 /* all extra regs are per-cpu when HT is on */
3504 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3505 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
3507 x86_pmu.hw_config = hsw_hw_config;
3508 x86_pmu.get_event_constraints = hsw_get_event_constraints;
3509 x86_pmu.cpu_events = hsw_events_attrs;
3510 x86_pmu.limit_period = bdw_limit_period;
3511 pr_cont("Broadwell events, ");
3514 case 78: /* 14nm Skylake Mobile */
3515 case 94: /* 14nm Skylake Desktop */
3516 x86_pmu.late_ack = true;
3517 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
3518 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
3519 intel_pmu_lbr_init_skl();
3521 x86_pmu.event_constraints = intel_skl_event_constraints;
3522 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
3523 x86_pmu.extra_regs = intel_skl_extra_regs;
3524 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
3525 /* all extra regs are per-cpu when HT is on */
3526 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3527 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
3529 x86_pmu.hw_config = hsw_hw_config;
3530 x86_pmu.get_event_constraints = hsw_get_event_constraints;
3531 x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
3533 WARN_ON(!x86_pmu.format_attrs);
3534 x86_pmu.cpu_events = hsw_events_attrs;
3535 pr_cont("Skylake events, ");
3539 switch (x86_pmu.version) {
3541 x86_pmu.event_constraints = intel_v1_event_constraints;
3542 pr_cont("generic architected perfmon v1, ");
3546 * default constraints for v2 and up
3548 x86_pmu.event_constraints = intel_gen_event_constraints;
3549 pr_cont("generic architected perfmon, ");
3554 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
3555 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
3556 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
3557 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
3559 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
3561 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
3562 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
3563 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
3564 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
3567 x86_pmu.intel_ctrl |=
3568 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
3570 if (x86_pmu.event_constraints) {
3572 * event on fixed counter2 (REF_CYCLES) only works on this
3573 * counter, so do not extend mask to generic counters
3575 for_each_event_constraint(c, x86_pmu.event_constraints) {
3576 if (c->cmask == FIXED_EVENT_FLAGS
3577 && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
3578 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
3581 ~(~0UL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
3582 c->weight = hweight64(c->idxmsk64);
3587 * Access LBR MSR may cause #GP under certain circumstances.
3588 * E.g. KVM doesn't support LBR MSR
3589 * Check all LBT MSR here.
3590 * Disable LBR access if any LBR MSRs can not be accessed.
3592 if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
3594 for (i = 0; i < x86_pmu.lbr_nr; i++) {
3595 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
3596 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
3601 * Access extra MSR may cause #GP under certain circumstances.
3602 * E.g. KVM doesn't support offcore event
3603 * Check all extra_regs here.
3605 if (x86_pmu.extra_regs) {
3606 for (er = x86_pmu.extra_regs; er->msr; er++) {
3607 er->extra_msr_access = check_msr(er->msr, 0x11UL);
3608 /* Disable LBR select mapping */
3609 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
3610 x86_pmu.lbr_sel_map = NULL;
3614 /* Support full width counters using alternative MSR range */
3615 if (x86_pmu.intel_cap.full_width_write) {
3616 x86_pmu.max_period = x86_pmu.cntval_mask;
3617 x86_pmu.perfctr = MSR_IA32_PMC0;
3618 pr_cont("full-width counters, ");
3625 * HT bug: phase 2 init
3626 * Called once we have valid topology information to check
3627 * whether or not HT is enabled
3628 * If HT is off, then we disable the workaround
3630 static __init int fixup_ht_bug(void)
3632 int cpu = smp_processor_id();
3635 * problem not present on this CPU model, nothing to do
3637 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
3640 w = cpumask_weight(topology_sibling_cpumask(cpu));
3642 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
3646 if (lockup_detector_suspend() != 0) {
3647 pr_debug("failed to disable PMU erratum BJ122, BV98, HSD29 workaround\n");
3651 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
3653 x86_pmu.start_scheduling = NULL;
3654 x86_pmu.commit_scheduling = NULL;
3655 x86_pmu.stop_scheduling = NULL;
3657 lockup_detector_resume();
3661 for_each_online_cpu(c) {
3666 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
3669 subsys_initcall(fixup_ht_bug)