2 * Performance events x86 architecture code
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
11 * For licencing details see kernel-base/COPYING
14 #include <linux/perf_event.h>
15 #include <linux/capability.h>
16 #include <linux/notifier.h>
17 #include <linux/hardirq.h>
18 #include <linux/kprobes.h>
19 #include <linux/module.h>
20 #include <linux/kdebug.h>
21 #include <linux/sched.h>
22 #include <linux/uaccess.h>
23 #include <linux/highmem.h>
24 #include <linux/cpu.h>
27 #include <asm/stacktrace.h>
30 static u64 perf_event_mask __read_mostly;
32 /* The maximal number of PEBS events: */
33 #define MAX_PEBS_EVENTS 4
35 /* The size of a BTS record in bytes: */
36 #define BTS_RECORD_SIZE 24
38 /* The size of a per-cpu BTS buffer in bytes: */
39 #define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
41 /* The BTS overflow threshold in bytes from the end of the buffer: */
42 #define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
46 * Bits in the debugctlmsr controlling branch tracing.
48 #define X86_DEBUGCTL_TR (1 << 6)
49 #define X86_DEBUGCTL_BTS (1 << 7)
50 #define X86_DEBUGCTL_BTINT (1 << 8)
51 #define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
52 #define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
55 * A debug store configuration.
57 * We only support architectures that use 64bit fields.
62 u64 bts_absolute_maximum;
63 u64 bts_interrupt_threshold;
66 u64 pebs_absolute_maximum;
67 u64 pebs_interrupt_threshold;
68 u64 pebs_event_reset[MAX_PEBS_EVENTS];
71 struct cpu_hw_events {
72 struct perf_event *events[X86_PMC_IDX_MAX];
73 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
74 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
75 unsigned long interrupts;
77 struct debug_store *ds;
80 struct event_constraint {
81 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
85 #define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) }
86 #define EVENT_CONSTRAINT_END { .code = 0, .idxmsk[0] = 0 }
88 #define for_each_event_constraint(e, c) \
89 for ((e) = (c); (e)->idxmsk[0]; (e)++)
93 * struct x86_pmu - generic x86 pmu
98 int (*handle_irq)(struct pt_regs *);
99 void (*disable_all)(void);
100 void (*enable_all)(void);
101 void (*enable)(struct hw_perf_event *, int);
102 void (*disable)(struct hw_perf_event *, int);
105 u64 (*event_map)(int);
106 u64 (*raw_event)(u64);
109 int num_events_fixed;
115 void (*enable_bts)(u64 config);
116 void (*disable_bts)(void);
117 int (*get_event_idx)(struct cpu_hw_events *cpuc,
118 struct hw_perf_event *hwc);
121 static struct x86_pmu x86_pmu __read_mostly;
123 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
127 static const struct event_constraint *event_constraints;
130 * Not sure about some of these
132 static const u64 p6_perfmon_event_map[] =
134 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
135 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
136 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
137 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
138 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
139 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
140 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
143 static u64 p6_pmu_event_map(int hw_event)
145 return p6_perfmon_event_map[hw_event];
149 * Event setting that is specified not to count anything.
150 * We use this to effectively disable a counter.
152 * L2_RQSTS with 0 MESI unit mask.
154 #define P6_NOP_EVENT 0x0000002EULL
156 static u64 p6_pmu_raw_event(u64 hw_event)
158 #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
159 #define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
160 #define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
161 #define P6_EVNTSEL_INV_MASK 0x00800000ULL
162 #define P6_EVNTSEL_REG_MASK 0xFF000000ULL
164 #define P6_EVNTSEL_MASK \
165 (P6_EVNTSEL_EVENT_MASK | \
166 P6_EVNTSEL_UNIT_MASK | \
167 P6_EVNTSEL_EDGE_MASK | \
168 P6_EVNTSEL_INV_MASK | \
171 return hw_event & P6_EVNTSEL_MASK;
174 static const struct event_constraint intel_p6_event_constraints[] =
176 EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
177 EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
178 EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
179 EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
180 EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
181 EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
186 * Intel PerfMon v3. Used on Core2 and later.
188 static const u64 intel_perfmon_event_map[] =
190 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
191 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
192 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
193 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
194 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
195 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
196 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
199 static const struct event_constraint intel_core_event_constraints[] =
201 EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
202 EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
203 EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
204 EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
205 EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
206 EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
207 EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
208 EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
209 EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
213 static const struct event_constraint intel_nehalem_event_constraints[] =
215 EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
216 EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
217 EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
218 EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
219 EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
220 EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */
221 EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
222 EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
223 EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */
224 EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */
228 static u64 intel_pmu_event_map(int hw_event)
230 return intel_perfmon_event_map[hw_event];
234 * Generalized hw caching related hw_event table, filled
235 * in on a per model basis. A value of 0 means
236 * 'not supported', -1 means 'hw_event makes no sense on
237 * this CPU', any other value means the raw hw_event
241 #define C(x) PERF_COUNT_HW_CACHE_##x
243 static u64 __read_mostly hw_cache_event_ids
244 [PERF_COUNT_HW_CACHE_MAX]
245 [PERF_COUNT_HW_CACHE_OP_MAX]
246 [PERF_COUNT_HW_CACHE_RESULT_MAX];
248 static __initconst u64 nehalem_hw_cache_event_ids
249 [PERF_COUNT_HW_CACHE_MAX]
250 [PERF_COUNT_HW_CACHE_OP_MAX]
251 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
255 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
256 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
259 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
260 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
262 [ C(OP_PREFETCH) ] = {
263 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
264 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
269 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
270 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
273 [ C(RESULT_ACCESS) ] = -1,
274 [ C(RESULT_MISS) ] = -1,
276 [ C(OP_PREFETCH) ] = {
277 [ C(RESULT_ACCESS) ] = 0x0,
278 [ C(RESULT_MISS) ] = 0x0,
283 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
284 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
287 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
288 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
290 [ C(OP_PREFETCH) ] = {
291 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
292 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
297 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
298 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
301 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
302 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
304 [ C(OP_PREFETCH) ] = {
305 [ C(RESULT_ACCESS) ] = 0x0,
306 [ C(RESULT_MISS) ] = 0x0,
311 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
312 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
315 [ C(RESULT_ACCESS) ] = -1,
316 [ C(RESULT_MISS) ] = -1,
318 [ C(OP_PREFETCH) ] = {
319 [ C(RESULT_ACCESS) ] = -1,
320 [ C(RESULT_MISS) ] = -1,
325 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
326 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
329 [ C(RESULT_ACCESS) ] = -1,
330 [ C(RESULT_MISS) ] = -1,
332 [ C(OP_PREFETCH) ] = {
333 [ C(RESULT_ACCESS) ] = -1,
334 [ C(RESULT_MISS) ] = -1,
339 static __initconst u64 core2_hw_cache_event_ids
340 [PERF_COUNT_HW_CACHE_MAX]
341 [PERF_COUNT_HW_CACHE_OP_MAX]
342 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
346 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
347 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
350 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
351 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
353 [ C(OP_PREFETCH) ] = {
354 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
355 [ C(RESULT_MISS) ] = 0,
360 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
361 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
364 [ C(RESULT_ACCESS) ] = -1,
365 [ C(RESULT_MISS) ] = -1,
367 [ C(OP_PREFETCH) ] = {
368 [ C(RESULT_ACCESS) ] = 0,
369 [ C(RESULT_MISS) ] = 0,
374 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
375 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
378 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
379 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
381 [ C(OP_PREFETCH) ] = {
382 [ C(RESULT_ACCESS) ] = 0,
383 [ C(RESULT_MISS) ] = 0,
388 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
389 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
392 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
393 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
395 [ C(OP_PREFETCH) ] = {
396 [ C(RESULT_ACCESS) ] = 0,
397 [ C(RESULT_MISS) ] = 0,
402 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
403 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
406 [ C(RESULT_ACCESS) ] = -1,
407 [ C(RESULT_MISS) ] = -1,
409 [ C(OP_PREFETCH) ] = {
410 [ C(RESULT_ACCESS) ] = -1,
411 [ C(RESULT_MISS) ] = -1,
416 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
417 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
420 [ C(RESULT_ACCESS) ] = -1,
421 [ C(RESULT_MISS) ] = -1,
423 [ C(OP_PREFETCH) ] = {
424 [ C(RESULT_ACCESS) ] = -1,
425 [ C(RESULT_MISS) ] = -1,
430 static __initconst u64 atom_hw_cache_event_ids
431 [PERF_COUNT_HW_CACHE_MAX]
432 [PERF_COUNT_HW_CACHE_OP_MAX]
433 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
437 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
438 [ C(RESULT_MISS) ] = 0,
441 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
442 [ C(RESULT_MISS) ] = 0,
444 [ C(OP_PREFETCH) ] = {
445 [ C(RESULT_ACCESS) ] = 0x0,
446 [ C(RESULT_MISS) ] = 0,
451 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
452 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
455 [ C(RESULT_ACCESS) ] = -1,
456 [ C(RESULT_MISS) ] = -1,
458 [ C(OP_PREFETCH) ] = {
459 [ C(RESULT_ACCESS) ] = 0,
460 [ C(RESULT_MISS) ] = 0,
465 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
466 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
469 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
470 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
472 [ C(OP_PREFETCH) ] = {
473 [ C(RESULT_ACCESS) ] = 0,
474 [ C(RESULT_MISS) ] = 0,
479 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
480 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
483 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
484 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
486 [ C(OP_PREFETCH) ] = {
487 [ C(RESULT_ACCESS) ] = 0,
488 [ C(RESULT_MISS) ] = 0,
493 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
494 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
497 [ C(RESULT_ACCESS) ] = -1,
498 [ C(RESULT_MISS) ] = -1,
500 [ C(OP_PREFETCH) ] = {
501 [ C(RESULT_ACCESS) ] = -1,
502 [ C(RESULT_MISS) ] = -1,
507 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
508 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
511 [ C(RESULT_ACCESS) ] = -1,
512 [ C(RESULT_MISS) ] = -1,
514 [ C(OP_PREFETCH) ] = {
515 [ C(RESULT_ACCESS) ] = -1,
516 [ C(RESULT_MISS) ] = -1,
521 static u64 intel_pmu_raw_event(u64 hw_event)
523 #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
524 #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
525 #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
526 #define CORE_EVNTSEL_INV_MASK 0x00800000ULL
527 #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
529 #define CORE_EVNTSEL_MASK \
530 (CORE_EVNTSEL_EVENT_MASK | \
531 CORE_EVNTSEL_UNIT_MASK | \
532 CORE_EVNTSEL_EDGE_MASK | \
533 CORE_EVNTSEL_INV_MASK | \
534 CORE_EVNTSEL_REG_MASK)
536 return hw_event & CORE_EVNTSEL_MASK;
539 static __initconst u64 amd_hw_cache_event_ids
540 [PERF_COUNT_HW_CACHE_MAX]
541 [PERF_COUNT_HW_CACHE_OP_MAX]
542 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
546 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
547 [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
550 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
551 [ C(RESULT_MISS) ] = 0,
553 [ C(OP_PREFETCH) ] = {
554 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
555 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
560 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
561 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
564 [ C(RESULT_ACCESS) ] = -1,
565 [ C(RESULT_MISS) ] = -1,
567 [ C(OP_PREFETCH) ] = {
568 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
569 [ C(RESULT_MISS) ] = 0,
574 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
575 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
578 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
579 [ C(RESULT_MISS) ] = 0,
581 [ C(OP_PREFETCH) ] = {
582 [ C(RESULT_ACCESS) ] = 0,
583 [ C(RESULT_MISS) ] = 0,
588 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
589 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
592 [ C(RESULT_ACCESS) ] = 0,
593 [ C(RESULT_MISS) ] = 0,
595 [ C(OP_PREFETCH) ] = {
596 [ C(RESULT_ACCESS) ] = 0,
597 [ C(RESULT_MISS) ] = 0,
602 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
603 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
606 [ C(RESULT_ACCESS) ] = -1,
607 [ C(RESULT_MISS) ] = -1,
609 [ C(OP_PREFETCH) ] = {
610 [ C(RESULT_ACCESS) ] = -1,
611 [ C(RESULT_MISS) ] = -1,
616 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
617 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
620 [ C(RESULT_ACCESS) ] = -1,
621 [ C(RESULT_MISS) ] = -1,
623 [ C(OP_PREFETCH) ] = {
624 [ C(RESULT_ACCESS) ] = -1,
625 [ C(RESULT_MISS) ] = -1,
631 * AMD Performance Monitor K7 and later.
633 static const u64 amd_perfmon_event_map[] =
635 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
636 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
637 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
638 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
639 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
640 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
643 static u64 amd_pmu_event_map(int hw_event)
645 return amd_perfmon_event_map[hw_event];
648 static u64 amd_pmu_raw_event(u64 hw_event)
650 #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
651 #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
652 #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
653 #define K7_EVNTSEL_INV_MASK 0x000800000ULL
654 #define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
656 #define K7_EVNTSEL_MASK \
657 (K7_EVNTSEL_EVENT_MASK | \
658 K7_EVNTSEL_UNIT_MASK | \
659 K7_EVNTSEL_EDGE_MASK | \
660 K7_EVNTSEL_INV_MASK | \
663 return hw_event & K7_EVNTSEL_MASK;
667 * Propagate event elapsed time into the generic event.
668 * Can only be executed on the CPU where the event is active.
669 * Returns the delta events processed.
672 x86_perf_event_update(struct perf_event *event,
673 struct hw_perf_event *hwc, int idx)
675 int shift = 64 - x86_pmu.event_bits;
676 u64 prev_raw_count, new_raw_count;
679 if (idx == X86_PMC_IDX_FIXED_BTS)
683 * Careful: an NMI might modify the previous event value.
685 * Our tactic to handle this is to first atomically read and
686 * exchange a new raw count - then add that new-prev delta
687 * count to the generic event atomically:
690 prev_raw_count = atomic64_read(&hwc->prev_count);
691 rdmsrl(hwc->event_base + idx, new_raw_count);
693 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
694 new_raw_count) != prev_raw_count)
698 * Now we have the new raw value and have updated the prev
699 * timestamp already. We can now calculate the elapsed delta
700 * (event-)time and add that to the generic event.
702 * Careful, not all hw sign-extends above the physical width
705 delta = (new_raw_count << shift) - (prev_raw_count << shift);
708 atomic64_add(delta, &event->count);
709 atomic64_sub(delta, &hwc->period_left);
711 return new_raw_count;
714 static atomic_t active_events;
715 static DEFINE_MUTEX(pmc_reserve_mutex);
717 static bool reserve_pmc_hardware(void)
719 #ifdef CONFIG_X86_LOCAL_APIC
722 if (nmi_watchdog == NMI_LOCAL_APIC)
723 disable_lapic_nmi_watchdog();
725 for (i = 0; i < x86_pmu.num_events; i++) {
726 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
730 for (i = 0; i < x86_pmu.num_events; i++) {
731 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
738 #ifdef CONFIG_X86_LOCAL_APIC
740 for (i--; i >= 0; i--)
741 release_evntsel_nmi(x86_pmu.eventsel + i);
743 i = x86_pmu.num_events;
746 for (i--; i >= 0; i--)
747 release_perfctr_nmi(x86_pmu.perfctr + i);
749 if (nmi_watchdog == NMI_LOCAL_APIC)
750 enable_lapic_nmi_watchdog();
756 static void release_pmc_hardware(void)
758 #ifdef CONFIG_X86_LOCAL_APIC
761 for (i = 0; i < x86_pmu.num_events; i++) {
762 release_perfctr_nmi(x86_pmu.perfctr + i);
763 release_evntsel_nmi(x86_pmu.eventsel + i);
766 if (nmi_watchdog == NMI_LOCAL_APIC)
767 enable_lapic_nmi_watchdog();
771 static inline bool bts_available(void)
773 return x86_pmu.enable_bts != NULL;
776 static inline void init_debug_store_on_cpu(int cpu)
778 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
783 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
784 (u32)((u64)(unsigned long)ds),
785 (u32)((u64)(unsigned long)ds >> 32));
788 static inline void fini_debug_store_on_cpu(int cpu)
790 if (!per_cpu(cpu_hw_events, cpu).ds)
793 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
796 static void release_bts_hardware(void)
800 if (!bts_available())
805 for_each_online_cpu(cpu)
806 fini_debug_store_on_cpu(cpu);
808 for_each_possible_cpu(cpu) {
809 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
814 per_cpu(cpu_hw_events, cpu).ds = NULL;
816 kfree((void *)(unsigned long)ds->bts_buffer_base);
823 static int reserve_bts_hardware(void)
827 if (!bts_available())
832 for_each_possible_cpu(cpu) {
833 struct debug_store *ds;
837 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
838 if (unlikely(!buffer))
841 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
847 ds->bts_buffer_base = (u64)(unsigned long)buffer;
848 ds->bts_index = ds->bts_buffer_base;
849 ds->bts_absolute_maximum =
850 ds->bts_buffer_base + BTS_BUFFER_SIZE;
851 ds->bts_interrupt_threshold =
852 ds->bts_absolute_maximum - BTS_OVFL_TH;
854 per_cpu(cpu_hw_events, cpu).ds = ds;
859 release_bts_hardware();
861 for_each_online_cpu(cpu)
862 init_debug_store_on_cpu(cpu);
870 static void hw_perf_event_destroy(struct perf_event *event)
872 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
873 release_pmc_hardware();
874 release_bts_hardware();
875 mutex_unlock(&pmc_reserve_mutex);
879 static inline int x86_pmu_initialized(void)
881 return x86_pmu.handle_irq != NULL;
885 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
887 unsigned int cache_type, cache_op, cache_result;
890 config = attr->config;
892 cache_type = (config >> 0) & 0xff;
893 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
896 cache_op = (config >> 8) & 0xff;
897 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
900 cache_result = (config >> 16) & 0xff;
901 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
904 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
917 static void intel_pmu_enable_bts(u64 config)
919 unsigned long debugctlmsr;
921 debugctlmsr = get_debugctlmsr();
923 debugctlmsr |= X86_DEBUGCTL_TR;
924 debugctlmsr |= X86_DEBUGCTL_BTS;
925 debugctlmsr |= X86_DEBUGCTL_BTINT;
927 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
928 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
930 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
931 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
933 update_debugctlmsr(debugctlmsr);
936 static void intel_pmu_disable_bts(void)
938 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
939 unsigned long debugctlmsr;
944 debugctlmsr = get_debugctlmsr();
947 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
948 X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
950 update_debugctlmsr(debugctlmsr);
954 * Setup the hardware configuration for a given attr_type
956 static int __hw_perf_event_init(struct perf_event *event)
958 struct perf_event_attr *attr = &event->attr;
959 struct hw_perf_event *hwc = &event->hw;
963 if (!x86_pmu_initialized())
967 if (!atomic_inc_not_zero(&active_events)) {
968 mutex_lock(&pmc_reserve_mutex);
969 if (atomic_read(&active_events) == 0) {
970 if (!reserve_pmc_hardware())
973 err = reserve_bts_hardware();
976 atomic_inc(&active_events);
977 mutex_unlock(&pmc_reserve_mutex);
982 event->destroy = hw_perf_event_destroy;
986 * (keep 'enabled' bit clear for now)
988 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
993 * Count user and OS events unless requested not to.
995 if (!attr->exclude_user)
996 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
997 if (!attr->exclude_kernel)
998 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1000 if (!hwc->sample_period) {
1001 hwc->sample_period = x86_pmu.max_period;
1002 hwc->last_period = hwc->sample_period;
1003 atomic64_set(&hwc->period_left, hwc->sample_period);
1006 * If we have a PMU initialized but no APIC
1007 * interrupts, we cannot sample hardware
1008 * events (user-space has to fall back and
1009 * sample via a hrtimer based software event):
1016 * Raw hw_event type provide the config in the hw_event structure
1018 if (attr->type == PERF_TYPE_RAW) {
1019 hwc->config |= x86_pmu.raw_event(attr->config);
1023 if (attr->type == PERF_TYPE_HW_CACHE)
1024 return set_ext_hw_attr(hwc, attr);
1026 if (attr->config >= x86_pmu.max_events)
1032 config = x86_pmu.event_map(attr->config);
1043 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1044 (hwc->sample_period == 1)) {
1045 /* BTS is not supported by this architecture. */
1046 if (!bts_available())
1049 /* BTS is currently only allowed for user-mode. */
1050 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1054 hwc->config |= config;
1059 static void p6_pmu_disable_all(void)
1061 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1070 /* p6 only has one enable register */
1071 rdmsrl(MSR_P6_EVNTSEL0, val);
1072 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1073 wrmsrl(MSR_P6_EVNTSEL0, val);
1076 static void intel_pmu_disable_all(void)
1078 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1086 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1088 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1089 intel_pmu_disable_bts();
1092 static void amd_pmu_disable_all(void)
1094 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1102 * ensure we write the disable before we start disabling the
1103 * events proper, so that amd_pmu_enable_event() does the
1108 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1111 if (!test_bit(idx, cpuc->active_mask))
1113 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
1114 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1116 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1117 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1121 void hw_perf_disable(void)
1123 if (!x86_pmu_initialized())
1125 return x86_pmu.disable_all();
1128 static void p6_pmu_enable_all(void)
1130 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1139 /* p6 only has one enable register */
1140 rdmsrl(MSR_P6_EVNTSEL0, val);
1141 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1142 wrmsrl(MSR_P6_EVNTSEL0, val);
1145 static void intel_pmu_enable_all(void)
1147 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1155 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1157 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1158 struct perf_event *event =
1159 cpuc->events[X86_PMC_IDX_FIXED_BTS];
1161 if (WARN_ON_ONCE(!event))
1164 intel_pmu_enable_bts(event->hw.config);
1168 static void amd_pmu_enable_all(void)
1170 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1179 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1180 struct perf_event *event = cpuc->events[idx];
1183 if (!test_bit(idx, cpuc->active_mask))
1186 val = event->hw.config;
1187 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1188 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1192 void hw_perf_enable(void)
1194 if (!x86_pmu_initialized())
1196 x86_pmu.enable_all();
1199 static inline u64 intel_pmu_get_status(void)
1203 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1208 static inline void intel_pmu_ack_status(u64 ack)
1210 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1213 static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1215 (void)checking_wrmsrl(hwc->config_base + idx,
1216 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
1219 static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1221 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
1225 intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
1227 int idx = __idx - X86_PMC_IDX_FIXED;
1230 mask = 0xfULL << (idx * 4);
1232 rdmsrl(hwc->config_base, ctrl_val);
1234 (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1238 p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1240 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1241 u64 val = P6_NOP_EVENT;
1244 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1246 (void)checking_wrmsrl(hwc->config_base + idx, val);
1250 intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1252 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1253 intel_pmu_disable_bts();
1257 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1258 intel_pmu_disable_fixed(hwc, idx);
1262 x86_pmu_disable_event(hwc, idx);
1266 amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1268 x86_pmu_disable_event(hwc, idx);
1271 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1274 * Set the next IRQ period, based on the hwc->period_left value.
1275 * To be called with the event disabled in hw:
1278 x86_perf_event_set_period(struct perf_event *event,
1279 struct hw_perf_event *hwc, int idx)
1281 s64 left = atomic64_read(&hwc->period_left);
1282 s64 period = hwc->sample_period;
1285 if (idx == X86_PMC_IDX_FIXED_BTS)
1289 * If we are way outside a reasonable range then just skip forward:
1291 if (unlikely(left <= -period)) {
1293 atomic64_set(&hwc->period_left, left);
1294 hwc->last_period = period;
1298 if (unlikely(left <= 0)) {
1300 atomic64_set(&hwc->period_left, left);
1301 hwc->last_period = period;
1305 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1307 if (unlikely(left < 2))
1310 if (left > x86_pmu.max_period)
1311 left = x86_pmu.max_period;
1313 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1316 * The hw event starts counting from this event offset,
1317 * mark it to be able to extra future deltas:
1319 atomic64_set(&hwc->prev_count, (u64)-left);
1321 err = checking_wrmsrl(hwc->event_base + idx,
1322 (u64)(-left) & x86_pmu.event_mask);
1324 perf_event_update_userpage(event);
1330 intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1332 int idx = __idx - X86_PMC_IDX_FIXED;
1333 u64 ctrl_val, bits, mask;
1337 * Enable IRQ generation (0x8),
1338 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1342 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1344 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1347 mask = 0xfULL << (idx * 4);
1349 rdmsrl(hwc->config_base, ctrl_val);
1352 err = checking_wrmsrl(hwc->config_base, ctrl_val);
1355 static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1357 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1362 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1364 (void)checking_wrmsrl(hwc->config_base + idx, val);
1368 static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1370 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1371 if (!__get_cpu_var(cpu_hw_events).enabled)
1374 intel_pmu_enable_bts(hwc->config);
1378 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1379 intel_pmu_enable_fixed(hwc, idx);
1383 x86_pmu_enable_event(hwc, idx);
1386 static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1388 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1391 x86_pmu_enable_event(hwc, idx);
1394 static int fixed_mode_idx(struct hw_perf_event *hwc)
1396 unsigned int hw_event;
1398 hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK;
1400 if (unlikely((hw_event ==
1401 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
1402 (hwc->sample_period == 1)))
1403 return X86_PMC_IDX_FIXED_BTS;
1405 if (!x86_pmu.num_events_fixed)
1409 * fixed counters do not take all possible filters
1411 if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK)
1414 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
1415 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
1416 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
1417 return X86_PMC_IDX_FIXED_CPU_CYCLES;
1418 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
1419 return X86_PMC_IDX_FIXED_BUS_CYCLES;
1425 * generic counter allocator: get next free counter
1428 gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
1432 idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events);
1433 return idx == x86_pmu.num_events ? -1 : idx;
1437 * intel-specific counter allocator: check event constraints
1440 intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
1442 const struct event_constraint *event_constraint;
1445 if (!event_constraints)
1448 code = hwc->config & CORE_EVNTSEL_EVENT_MASK;
1450 for_each_event_constraint(event_constraint, event_constraints) {
1451 if (code == event_constraint->code) {
1452 for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) {
1453 if (!test_and_set_bit(i, cpuc->used_mask))
1460 return gen_get_event_idx(cpuc, hwc);
1464 x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
1468 idx = fixed_mode_idx(hwc);
1469 if (idx == X86_PMC_IDX_FIXED_BTS) {
1470 /* BTS is already occupied. */
1471 if (test_and_set_bit(idx, cpuc->used_mask))
1474 hwc->config_base = 0;
1475 hwc->event_base = 0;
1477 } else if (idx >= 0) {
1479 * Try to get the fixed event, if that is already taken
1480 * then try to get a generic event:
1482 if (test_and_set_bit(idx, cpuc->used_mask))
1485 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1487 * We set it so that event_base + idx in wrmsr/rdmsr maps to
1488 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1491 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1495 /* Try to get the previous generic event again */
1496 if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) {
1498 idx = x86_pmu.get_event_idx(cpuc, hwc);
1502 set_bit(idx, cpuc->used_mask);
1505 hwc->config_base = x86_pmu.eventsel;
1506 hwc->event_base = x86_pmu.perfctr;
1513 * Find a PMC slot for the freshly enabled / scheduled in event:
1515 static int x86_pmu_enable(struct perf_event *event)
1517 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1518 struct hw_perf_event *hwc = &event->hw;
1521 idx = x86_schedule_event(cpuc, hwc);
1525 perf_events_lapic_init();
1527 x86_pmu.disable(hwc, idx);
1529 cpuc->events[idx] = event;
1530 set_bit(idx, cpuc->active_mask);
1532 x86_perf_event_set_period(event, hwc, idx);
1533 x86_pmu.enable(hwc, idx);
1535 perf_event_update_userpage(event);
1540 static void x86_pmu_unthrottle(struct perf_event *event)
1542 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1543 struct hw_perf_event *hwc = &event->hw;
1545 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1546 cpuc->events[hwc->idx] != event))
1549 x86_pmu.enable(hwc, hwc->idx);
1552 void perf_event_print_debug(void)
1554 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1555 struct cpu_hw_events *cpuc;
1556 unsigned long flags;
1559 if (!x86_pmu.num_events)
1562 local_irq_save(flags);
1564 cpu = smp_processor_id();
1565 cpuc = &per_cpu(cpu_hw_events, cpu);
1567 if (x86_pmu.version >= 2) {
1568 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1569 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1570 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1571 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1574 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1575 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1576 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1577 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
1579 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
1581 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1582 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1583 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
1585 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1587 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
1588 cpu, idx, pmc_ctrl);
1589 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
1590 cpu, idx, pmc_count);
1591 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
1592 cpu, idx, prev_left);
1594 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1595 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1597 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1598 cpu, idx, pmc_count);
1600 local_irq_restore(flags);
1603 static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
1605 struct debug_store *ds = cpuc->ds;
1611 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
1612 struct bts_record *at, *top;
1613 struct perf_output_handle handle;
1614 struct perf_event_header header;
1615 struct perf_sample_data data;
1616 struct pt_regs regs;
1624 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1625 top = (struct bts_record *)(unsigned long)ds->bts_index;
1630 ds->bts_index = ds->bts_buffer_base;
1633 data.period = event->hw.last_period;
1639 * Prepare a generic sample, i.e. fill in the invariant fields.
1640 * We will overwrite the from and to address before we output
1643 perf_prepare_sample(&header, &data, event, ®s);
1645 if (perf_output_begin(&handle, event,
1646 header.size * (top - at), 1, 1))
1649 for (; at < top; at++) {
1653 perf_output_sample(&handle, &header, &data, event);
1656 perf_output_end(&handle);
1658 /* There's new data available. */
1659 event->hw.interrupts++;
1660 event->pending_kill = POLL_IN;
1663 static void x86_pmu_disable(struct perf_event *event)
1665 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1666 struct hw_perf_event *hwc = &event->hw;
1670 * Must be done before we disable, otherwise the nmi handler
1671 * could reenable again:
1673 clear_bit(idx, cpuc->active_mask);
1674 x86_pmu.disable(hwc, idx);
1677 * Make sure the cleared pointer becomes visible before we
1678 * (potentially) free the event:
1683 * Drain the remaining delta count out of a event
1684 * that we are disabling:
1686 x86_perf_event_update(event, hwc, idx);
1688 /* Drain the remaining BTS records. */
1689 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1690 intel_pmu_drain_bts_buffer(cpuc);
1692 cpuc->events[idx] = NULL;
1693 clear_bit(idx, cpuc->used_mask);
1695 perf_event_update_userpage(event);
1699 * Save and restart an expired event. Called by NMI contexts,
1700 * so it has to be careful about preempting normal event ops:
1702 static int intel_pmu_save_and_restart(struct perf_event *event)
1704 struct hw_perf_event *hwc = &event->hw;
1708 x86_perf_event_update(event, hwc, idx);
1709 ret = x86_perf_event_set_period(event, hwc, idx);
1711 if (event->state == PERF_EVENT_STATE_ACTIVE)
1712 intel_pmu_enable_event(hwc, idx);
1717 static void intel_pmu_reset(void)
1719 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
1720 unsigned long flags;
1723 if (!x86_pmu.num_events)
1726 local_irq_save(flags);
1728 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1730 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1731 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1732 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
1734 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1735 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1738 ds->bts_index = ds->bts_buffer_base;
1740 local_irq_restore(flags);
1743 static int p6_pmu_handle_irq(struct pt_regs *regs)
1745 struct perf_sample_data data;
1746 struct cpu_hw_events *cpuc;
1747 struct perf_event *event;
1748 struct hw_perf_event *hwc;
1749 int idx, handled = 0;
1755 cpuc = &__get_cpu_var(cpu_hw_events);
1757 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1758 if (!test_bit(idx, cpuc->active_mask))
1761 event = cpuc->events[idx];
1764 val = x86_perf_event_update(event, hwc, idx);
1765 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1772 data.period = event->hw.last_period;
1774 if (!x86_perf_event_set_period(event, hwc, idx))
1777 if (perf_event_overflow(event, 1, &data, regs))
1778 p6_pmu_disable_event(hwc, idx);
1782 inc_irq_stat(apic_perf_irqs);
1788 * This handler is triggered by the local APIC, so the APIC IRQ handling
1791 static int intel_pmu_handle_irq(struct pt_regs *regs)
1793 struct perf_sample_data data;
1794 struct cpu_hw_events *cpuc;
1801 cpuc = &__get_cpu_var(cpu_hw_events);
1804 intel_pmu_drain_bts_buffer(cpuc);
1805 status = intel_pmu_get_status();
1813 if (++loops > 100) {
1814 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1815 perf_event_print_debug();
1821 inc_irq_stat(apic_perf_irqs);
1823 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1824 struct perf_event *event = cpuc->events[bit];
1826 clear_bit(bit, (unsigned long *) &status);
1827 if (!test_bit(bit, cpuc->active_mask))
1830 if (!intel_pmu_save_and_restart(event))
1833 data.period = event->hw.last_period;
1835 if (perf_event_overflow(event, 1, &data, regs))
1836 intel_pmu_disable_event(&event->hw, bit);
1839 intel_pmu_ack_status(ack);
1842 * Repeat if there is more work to be done:
1844 status = intel_pmu_get_status();
1853 static int amd_pmu_handle_irq(struct pt_regs *regs)
1855 struct perf_sample_data data;
1856 struct cpu_hw_events *cpuc;
1857 struct perf_event *event;
1858 struct hw_perf_event *hwc;
1859 int idx, handled = 0;
1865 cpuc = &__get_cpu_var(cpu_hw_events);
1867 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1868 if (!test_bit(idx, cpuc->active_mask))
1871 event = cpuc->events[idx];
1874 val = x86_perf_event_update(event, hwc, idx);
1875 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1882 data.period = event->hw.last_period;
1884 if (!x86_perf_event_set_period(event, hwc, idx))
1887 if (perf_event_overflow(event, 1, &data, regs))
1888 amd_pmu_disable_event(hwc, idx);
1892 inc_irq_stat(apic_perf_irqs);
1897 void smp_perf_pending_interrupt(struct pt_regs *regs)
1901 inc_irq_stat(apic_pending_irqs);
1902 perf_event_do_pending();
1906 void set_perf_event_pending(void)
1908 #ifdef CONFIG_X86_LOCAL_APIC
1909 if (!x86_pmu.apic || !x86_pmu_initialized())
1912 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
1916 void perf_events_lapic_init(void)
1918 #ifdef CONFIG_X86_LOCAL_APIC
1919 if (!x86_pmu.apic || !x86_pmu_initialized())
1923 * Always use NMI for PMU
1925 apic_write(APIC_LVTPC, APIC_DM_NMI);
1929 static int __kprobes
1930 perf_event_nmi_handler(struct notifier_block *self,
1931 unsigned long cmd, void *__args)
1933 struct die_args *args = __args;
1934 struct pt_regs *regs;
1936 if (!atomic_read(&active_events))
1950 #ifdef CONFIG_X86_LOCAL_APIC
1951 apic_write(APIC_LVTPC, APIC_DM_NMI);
1954 * Can't rely on the handled return value to say it was our NMI, two
1955 * events could trigger 'simultaneously' raising two back-to-back NMIs.
1957 * If the first NMI handles both, the latter will be empty and daze
1960 x86_pmu.handle_irq(regs);
1965 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1966 .notifier_call = perf_event_nmi_handler,
1971 static __initconst struct x86_pmu p6_pmu = {
1973 .handle_irq = p6_pmu_handle_irq,
1974 .disable_all = p6_pmu_disable_all,
1975 .enable_all = p6_pmu_enable_all,
1976 .enable = p6_pmu_enable_event,
1977 .disable = p6_pmu_disable_event,
1978 .eventsel = MSR_P6_EVNTSEL0,
1979 .perfctr = MSR_P6_PERFCTR0,
1980 .event_map = p6_pmu_event_map,
1981 .raw_event = p6_pmu_raw_event,
1982 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
1984 .max_period = (1ULL << 31) - 1,
1988 * Events have 40 bits implemented. However they are designed such
1989 * that bits [32-39] are sign extensions of bit 31. As such the
1990 * effective width of a event for P6-like PMU is 32 bits only.
1992 * See IA-32 Intel Architecture Software developer manual Vol 3B
1995 .event_mask = (1ULL << 32) - 1,
1996 .get_event_idx = intel_get_event_idx,
1999 static __initconst struct x86_pmu intel_pmu = {
2001 .handle_irq = intel_pmu_handle_irq,
2002 .disable_all = intel_pmu_disable_all,
2003 .enable_all = intel_pmu_enable_all,
2004 .enable = intel_pmu_enable_event,
2005 .disable = intel_pmu_disable_event,
2006 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2007 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2008 .event_map = intel_pmu_event_map,
2009 .raw_event = intel_pmu_raw_event,
2010 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2013 * Intel PMCs cannot be accessed sanely above 32 bit width,
2014 * so we install an artificial 1<<31 period regardless of
2015 * the generic event period:
2017 .max_period = (1ULL << 31) - 1,
2018 .enable_bts = intel_pmu_enable_bts,
2019 .disable_bts = intel_pmu_disable_bts,
2020 .get_event_idx = intel_get_event_idx,
2023 static __initconst struct x86_pmu amd_pmu = {
2025 .handle_irq = amd_pmu_handle_irq,
2026 .disable_all = amd_pmu_disable_all,
2027 .enable_all = amd_pmu_enable_all,
2028 .enable = amd_pmu_enable_event,
2029 .disable = amd_pmu_disable_event,
2030 .eventsel = MSR_K7_EVNTSEL0,
2031 .perfctr = MSR_K7_PERFCTR0,
2032 .event_map = amd_pmu_event_map,
2033 .raw_event = amd_pmu_raw_event,
2034 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
2037 .event_mask = (1ULL << 48) - 1,
2039 /* use highest bit to detect overflow */
2040 .max_period = (1ULL << 47) - 1,
2041 .get_event_idx = gen_get_event_idx,
2044 static __init int p6_pmu_init(void)
2046 switch (boot_cpu_data.x86_model) {
2048 case 3: /* Pentium Pro */
2050 case 6: /* Pentium II */
2053 case 11: /* Pentium III */
2054 event_constraints = intel_p6_event_constraints;
2059 event_constraints = intel_p6_event_constraints;
2062 pr_cont("unsupported p6 CPU model %d ",
2063 boot_cpu_data.x86_model);
2072 static __init int intel_pmu_init(void)
2074 union cpuid10_edx edx;
2075 union cpuid10_eax eax;
2076 unsigned int unused;
2080 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2081 /* check for P6 processor family */
2082 if (boot_cpu_data.x86 == 6) {
2083 return p6_pmu_init();
2090 * Check whether the Architectural PerfMon supports
2091 * Branch Misses Retired hw_event or not.
2093 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
2094 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
2097 version = eax.split.version_id;
2101 x86_pmu = intel_pmu;
2102 x86_pmu.version = version;
2103 x86_pmu.num_events = eax.split.num_events;
2104 x86_pmu.event_bits = eax.split.bit_width;
2105 x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
2108 * Quirk: v2 perfmon does not report fixed-purpose events, so
2109 * assume at least 3 events:
2111 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
2114 * Install the hw-cache-events table:
2116 switch (boot_cpu_data.x86_model) {
2117 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2118 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2119 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2120 case 29: /* six-core 45 nm xeon "Dunnington" */
2121 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2122 sizeof(hw_cache_event_ids));
2124 pr_cont("Core2 events, ");
2125 event_constraints = intel_core_event_constraints;
2129 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2130 sizeof(hw_cache_event_ids));
2132 event_constraints = intel_nehalem_event_constraints;
2133 pr_cont("Nehalem/Corei7 events, ");
2136 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2137 sizeof(hw_cache_event_ids));
2139 pr_cont("Atom events, ");
2145 static __init int amd_pmu_init(void)
2147 /* Performance-monitoring supported from K7 and later: */
2148 if (boot_cpu_data.x86 < 6)
2153 /* Events are common for all AMDs */
2154 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2155 sizeof(hw_cache_event_ids));
2160 static void __init pmu_check_apic(void)
2166 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2167 pr_info("no hardware sampling interrupt available.\n");
2170 void __init init_hw_perf_events(void)
2174 pr_info("Performance Events: ");
2176 switch (boot_cpu_data.x86_vendor) {
2177 case X86_VENDOR_INTEL:
2178 err = intel_pmu_init();
2180 case X86_VENDOR_AMD:
2181 err = amd_pmu_init();
2187 pr_cont("no PMU driver, software events only.\n");
2193 pr_cont("%s PMU driver.\n", x86_pmu.name);
2195 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2196 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2197 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2198 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
2200 perf_event_mask = (1 << x86_pmu.num_events) - 1;
2201 perf_max_events = x86_pmu.num_events;
2203 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2204 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2205 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2206 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
2210 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2211 x86_pmu.intel_ctrl = perf_event_mask;
2213 perf_events_lapic_init();
2214 register_die_notifier(&perf_event_nmi_notifier);
2216 pr_info("... version: %d\n", x86_pmu.version);
2217 pr_info("... bit width: %d\n", x86_pmu.event_bits);
2218 pr_info("... generic registers: %d\n", x86_pmu.num_events);
2219 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
2220 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2221 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
2222 pr_info("... event mask: %016Lx\n", perf_event_mask);
2225 static inline void x86_pmu_read(struct perf_event *event)
2227 x86_perf_event_update(event, &event->hw, event->hw.idx);
2230 static const struct pmu pmu = {
2231 .enable = x86_pmu_enable,
2232 .disable = x86_pmu_disable,
2233 .read = x86_pmu_read,
2234 .unthrottle = x86_pmu_unthrottle,
2238 validate_event(struct cpu_hw_events *cpuc, struct perf_event *event)
2240 struct hw_perf_event fake_event = event->hw;
2242 if (event->pmu && event->pmu != &pmu)
2245 return x86_schedule_event(cpuc, &fake_event) >= 0;
2248 static int validate_group(struct perf_event *event)
2250 struct perf_event *sibling, *leader = event->group_leader;
2251 struct cpu_hw_events fake_pmu;
2253 memset(&fake_pmu, 0, sizeof(fake_pmu));
2255 if (!validate_event(&fake_pmu, leader))
2258 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
2259 if (!validate_event(&fake_pmu, sibling))
2263 if (!validate_event(&fake_pmu, event))
2269 const struct pmu *hw_perf_event_init(struct perf_event *event)
2273 err = __hw_perf_event_init(event);
2275 if (event->group_leader != event)
2276 err = validate_group(event);
2280 event->destroy(event);
2281 return ERR_PTR(err);
2292 void callchain_store(struct perf_callchain_entry *entry, u64 ip)
2294 if (entry->nr < PERF_MAX_STACK_DEPTH)
2295 entry->ip[entry->nr++] = ip;
2298 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2299 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
2300 static DEFINE_PER_CPU(int, in_ignored_frame);
2304 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2306 /* Ignore warnings */
2309 static void backtrace_warning(void *data, char *msg)
2311 /* Ignore warnings */
2314 static int backtrace_stack(void *data, char *name)
2316 per_cpu(in_ignored_frame, smp_processor_id()) =
2317 x86_is_stack_id(NMI_STACK, name) ||
2318 x86_is_stack_id(DEBUG_STACK, name);
2323 static void backtrace_address(void *data, unsigned long addr, int reliable)
2325 struct perf_callchain_entry *entry = data;
2327 if (per_cpu(in_ignored_frame, smp_processor_id()))
2331 callchain_store(entry, addr);
2334 static const struct stacktrace_ops backtrace_ops = {
2335 .warning = backtrace_warning,
2336 .warning_symbol = backtrace_warning_symbol,
2337 .stack = backtrace_stack,
2338 .address = backtrace_address,
2339 .walk_stack = print_context_stack_bp,
2342 #include "../dumpstack.h"
2345 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2347 callchain_store(entry, PERF_CONTEXT_KERNEL);
2348 callchain_store(entry, regs->ip);
2350 dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
2354 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2356 static unsigned long
2357 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
2359 unsigned long offset, addr = (unsigned long)from;
2360 int type = in_nmi() ? KM_NMI : KM_IRQ0;
2361 unsigned long size, len = 0;
2367 ret = __get_user_pages_fast(addr, 1, 0, &page);
2371 offset = addr & (PAGE_SIZE - 1);
2372 size = min(PAGE_SIZE - offset, n - len);
2374 map = kmap_atomic(page, type);
2375 memcpy(to, map+offset, size);
2376 kunmap_atomic(map, type);
2388 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2390 unsigned long bytes;
2392 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2394 return bytes == sizeof(*frame);
2398 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2400 struct stack_frame frame;
2401 const void __user *fp;
2403 if (!user_mode(regs))
2404 regs = task_pt_regs(current);
2406 fp = (void __user *)regs->bp;
2408 callchain_store(entry, PERF_CONTEXT_USER);
2409 callchain_store(entry, regs->ip);
2411 while (entry->nr < PERF_MAX_STACK_DEPTH) {
2412 frame.next_frame = NULL;
2413 frame.return_address = 0;
2415 if (!copy_stack_frame(fp, &frame))
2418 if ((unsigned long)fp < regs->sp)
2421 callchain_store(entry, frame.return_address);
2422 fp = frame.next_frame;
2427 perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2434 is_user = user_mode(regs);
2436 if (!current || current->pid == 0)
2439 if (is_user && current->state != TASK_RUNNING)
2443 perf_callchain_kernel(regs, entry);
2446 perf_callchain_user(regs, entry);
2449 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2451 struct perf_callchain_entry *entry;
2454 entry = &__get_cpu_var(pmc_nmi_entry);
2456 entry = &__get_cpu_var(pmc_irq_entry);
2460 perf_do_callchain(regs, entry);
2465 void hw_perf_event_setup_online(int cpu)
2467 init_debug_store_on_cpu(cpu);