2 * Performance events x86 architecture code
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
11 * For licencing details see kernel-base/COPYING
14 #include <linux/perf_event.h>
15 #include <linux/capability.h>
16 #include <linux/notifier.h>
17 #include <linux/hardirq.h>
18 #include <linux/kprobes.h>
19 #include <linux/module.h>
20 #include <linux/kdebug.h>
21 #include <linux/sched.h>
22 #include <linux/uaccess.h>
23 #include <linux/highmem.h>
24 #include <linux/cpu.h>
27 #include <asm/stacktrace.h>
30 static u64 perf_event_mask __read_mostly;
32 /* The maximal number of PEBS events: */
33 #define MAX_PEBS_EVENTS 4
35 /* The size of a BTS record in bytes: */
36 #define BTS_RECORD_SIZE 24
38 /* The size of a per-cpu BTS buffer in bytes: */
39 #define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
41 /* The BTS overflow threshold in bytes from the end of the buffer: */
42 #define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
46 * Bits in the debugctlmsr controlling branch tracing.
48 #define X86_DEBUGCTL_TR (1 << 6)
49 #define X86_DEBUGCTL_BTS (1 << 7)
50 #define X86_DEBUGCTL_BTINT (1 << 8)
51 #define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
52 #define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
55 * A debug store configuration.
57 * We only support architectures that use 64bit fields.
62 u64 bts_absolute_maximum;
63 u64 bts_interrupt_threshold;
66 u64 pebs_absolute_maximum;
67 u64 pebs_interrupt_threshold;
68 u64 pebs_event_reset[MAX_PEBS_EVENTS];
71 struct cpu_hw_events {
72 struct perf_event *events[X86_PMC_IDX_MAX];
73 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
74 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
75 unsigned long interrupts;
77 struct debug_store *ds;
80 struct event_constraint {
81 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
85 #define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) }
86 #define EVENT_CONSTRAINT_END { .code = 0, .idxmsk[0] = 0 }
88 #define for_each_event_constraint(e, c) \
89 for ((e) = (c); (e)->idxmsk[0]; (e)++)
93 * struct x86_pmu - generic x86 pmu
98 int (*handle_irq)(struct pt_regs *);
99 void (*disable_all)(void);
100 void (*enable_all)(void);
101 void (*enable)(struct hw_perf_event *, int);
102 void (*disable)(struct hw_perf_event *, int);
105 u64 (*event_map)(int);
106 u64 (*raw_event)(u64);
109 int num_events_fixed;
115 void (*enable_bts)(u64 config);
116 void (*disable_bts)(void);
117 int (*get_event_idx)(struct cpu_hw_events *cpuc,
118 struct hw_perf_event *hwc);
121 static struct x86_pmu x86_pmu __read_mostly;
123 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
127 static const struct event_constraint *event_constraints;
130 * Not sure about some of these
132 static const u64 p6_perfmon_event_map[] =
134 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
135 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
136 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
137 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
138 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
139 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
140 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
143 static u64 p6_pmu_event_map(int hw_event)
145 return p6_perfmon_event_map[hw_event];
149 * Event setting that is specified not to count anything.
150 * We use this to effectively disable a counter.
152 * L2_RQSTS with 0 MESI unit mask.
154 #define P6_NOP_EVENT 0x0000002EULL
156 static u64 p6_pmu_raw_event(u64 hw_event)
158 #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
159 #define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
160 #define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
161 #define P6_EVNTSEL_INV_MASK 0x00800000ULL
162 #define P6_EVNTSEL_REG_MASK 0xFF000000ULL
164 #define P6_EVNTSEL_MASK \
165 (P6_EVNTSEL_EVENT_MASK | \
166 P6_EVNTSEL_UNIT_MASK | \
167 P6_EVNTSEL_EDGE_MASK | \
168 P6_EVNTSEL_INV_MASK | \
171 return hw_event & P6_EVNTSEL_MASK;
174 static const struct event_constraint intel_p6_event_constraints[] =
176 EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
177 EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
178 EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
179 EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
180 EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
181 EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
186 * Intel PerfMon v3. Used on Core2 and later.
188 static const u64 intel_perfmon_event_map[] =
190 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
191 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
192 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
193 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
194 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
195 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
196 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
199 static const struct event_constraint intel_core_event_constraints[] =
201 EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
202 EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
203 EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
204 EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
205 EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
206 EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
207 EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
208 EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
209 EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
213 static const struct event_constraint intel_nehalem_event_constraints[] =
215 EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
216 EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
217 EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
218 EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
219 EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
220 EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */
221 EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
222 EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
223 EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */
224 EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */
228 static u64 intel_pmu_event_map(int hw_event)
230 return intel_perfmon_event_map[hw_event];
234 * Generalized hw caching related hw_event table, filled
235 * in on a per model basis. A value of 0 means
236 * 'not supported', -1 means 'hw_event makes no sense on
237 * this CPU', any other value means the raw hw_event
241 #define C(x) PERF_COUNT_HW_CACHE_##x
243 static u64 __read_mostly hw_cache_event_ids
244 [PERF_COUNT_HW_CACHE_MAX]
245 [PERF_COUNT_HW_CACHE_OP_MAX]
246 [PERF_COUNT_HW_CACHE_RESULT_MAX];
248 static const u64 westmere_hw_cache_event_ids
249 [PERF_COUNT_HW_CACHE_MAX]
250 [PERF_COUNT_HW_CACHE_OP_MAX]
251 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
255 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
256 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
259 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
260 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
262 [ C(OP_PREFETCH) ] = {
263 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
264 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
269 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
270 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
273 [ C(RESULT_ACCESS) ] = -1,
274 [ C(RESULT_MISS) ] = -1,
276 [ C(OP_PREFETCH) ] = {
277 [ C(RESULT_ACCESS) ] = 0x0,
278 [ C(RESULT_MISS) ] = 0x0,
283 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
284 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
287 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
288 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
290 [ C(OP_PREFETCH) ] = {
291 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
292 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
297 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
298 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
301 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
302 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
304 [ C(OP_PREFETCH) ] = {
305 [ C(RESULT_ACCESS) ] = 0x0,
306 [ C(RESULT_MISS) ] = 0x0,
311 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
312 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
315 [ C(RESULT_ACCESS) ] = -1,
316 [ C(RESULT_MISS) ] = -1,
318 [ C(OP_PREFETCH) ] = {
319 [ C(RESULT_ACCESS) ] = -1,
320 [ C(RESULT_MISS) ] = -1,
325 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
326 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
329 [ C(RESULT_ACCESS) ] = -1,
330 [ C(RESULT_MISS) ] = -1,
332 [ C(OP_PREFETCH) ] = {
333 [ C(RESULT_ACCESS) ] = -1,
334 [ C(RESULT_MISS) ] = -1,
339 static __initconst u64 nehalem_hw_cache_event_ids
340 [PERF_COUNT_HW_CACHE_MAX]
341 [PERF_COUNT_HW_CACHE_OP_MAX]
342 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
346 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
347 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
350 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
351 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
353 [ C(OP_PREFETCH) ] = {
354 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
355 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
360 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
361 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
364 [ C(RESULT_ACCESS) ] = -1,
365 [ C(RESULT_MISS) ] = -1,
367 [ C(OP_PREFETCH) ] = {
368 [ C(RESULT_ACCESS) ] = 0x0,
369 [ C(RESULT_MISS) ] = 0x0,
374 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
375 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
378 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
379 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
381 [ C(OP_PREFETCH) ] = {
382 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
383 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
388 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
389 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
392 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
393 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
395 [ C(OP_PREFETCH) ] = {
396 [ C(RESULT_ACCESS) ] = 0x0,
397 [ C(RESULT_MISS) ] = 0x0,
402 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
403 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
406 [ C(RESULT_ACCESS) ] = -1,
407 [ C(RESULT_MISS) ] = -1,
409 [ C(OP_PREFETCH) ] = {
410 [ C(RESULT_ACCESS) ] = -1,
411 [ C(RESULT_MISS) ] = -1,
416 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
417 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
420 [ C(RESULT_ACCESS) ] = -1,
421 [ C(RESULT_MISS) ] = -1,
423 [ C(OP_PREFETCH) ] = {
424 [ C(RESULT_ACCESS) ] = -1,
425 [ C(RESULT_MISS) ] = -1,
430 static __initconst u64 core2_hw_cache_event_ids
431 [PERF_COUNT_HW_CACHE_MAX]
432 [PERF_COUNT_HW_CACHE_OP_MAX]
433 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
437 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
438 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
441 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
442 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
444 [ C(OP_PREFETCH) ] = {
445 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
446 [ C(RESULT_MISS) ] = 0,
451 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
452 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
455 [ C(RESULT_ACCESS) ] = -1,
456 [ C(RESULT_MISS) ] = -1,
458 [ C(OP_PREFETCH) ] = {
459 [ C(RESULT_ACCESS) ] = 0,
460 [ C(RESULT_MISS) ] = 0,
465 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
466 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
469 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
470 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
472 [ C(OP_PREFETCH) ] = {
473 [ C(RESULT_ACCESS) ] = 0,
474 [ C(RESULT_MISS) ] = 0,
479 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
480 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
483 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
484 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
486 [ C(OP_PREFETCH) ] = {
487 [ C(RESULT_ACCESS) ] = 0,
488 [ C(RESULT_MISS) ] = 0,
493 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
494 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
497 [ C(RESULT_ACCESS) ] = -1,
498 [ C(RESULT_MISS) ] = -1,
500 [ C(OP_PREFETCH) ] = {
501 [ C(RESULT_ACCESS) ] = -1,
502 [ C(RESULT_MISS) ] = -1,
507 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
508 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
511 [ C(RESULT_ACCESS) ] = -1,
512 [ C(RESULT_MISS) ] = -1,
514 [ C(OP_PREFETCH) ] = {
515 [ C(RESULT_ACCESS) ] = -1,
516 [ C(RESULT_MISS) ] = -1,
521 static __initconst u64 atom_hw_cache_event_ids
522 [PERF_COUNT_HW_CACHE_MAX]
523 [PERF_COUNT_HW_CACHE_OP_MAX]
524 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
528 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
529 [ C(RESULT_MISS) ] = 0,
532 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
533 [ C(RESULT_MISS) ] = 0,
535 [ C(OP_PREFETCH) ] = {
536 [ C(RESULT_ACCESS) ] = 0x0,
537 [ C(RESULT_MISS) ] = 0,
542 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
543 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
546 [ C(RESULT_ACCESS) ] = -1,
547 [ C(RESULT_MISS) ] = -1,
549 [ C(OP_PREFETCH) ] = {
550 [ C(RESULT_ACCESS) ] = 0,
551 [ C(RESULT_MISS) ] = 0,
556 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
557 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
560 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
561 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
563 [ C(OP_PREFETCH) ] = {
564 [ C(RESULT_ACCESS) ] = 0,
565 [ C(RESULT_MISS) ] = 0,
570 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
571 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
574 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
575 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
577 [ C(OP_PREFETCH) ] = {
578 [ C(RESULT_ACCESS) ] = 0,
579 [ C(RESULT_MISS) ] = 0,
584 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
585 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
588 [ C(RESULT_ACCESS) ] = -1,
589 [ C(RESULT_MISS) ] = -1,
591 [ C(OP_PREFETCH) ] = {
592 [ C(RESULT_ACCESS) ] = -1,
593 [ C(RESULT_MISS) ] = -1,
598 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
599 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
602 [ C(RESULT_ACCESS) ] = -1,
603 [ C(RESULT_MISS) ] = -1,
605 [ C(OP_PREFETCH) ] = {
606 [ C(RESULT_ACCESS) ] = -1,
607 [ C(RESULT_MISS) ] = -1,
612 static u64 intel_pmu_raw_event(u64 hw_event)
614 #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
615 #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
616 #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
617 #define CORE_EVNTSEL_INV_MASK 0x00800000ULL
618 #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
620 #define CORE_EVNTSEL_MASK \
621 (CORE_EVNTSEL_EVENT_MASK | \
622 CORE_EVNTSEL_UNIT_MASK | \
623 CORE_EVNTSEL_EDGE_MASK | \
624 CORE_EVNTSEL_INV_MASK | \
625 CORE_EVNTSEL_REG_MASK)
627 return hw_event & CORE_EVNTSEL_MASK;
630 static __initconst u64 amd_hw_cache_event_ids
631 [PERF_COUNT_HW_CACHE_MAX]
632 [PERF_COUNT_HW_CACHE_OP_MAX]
633 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
637 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
638 [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
641 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
642 [ C(RESULT_MISS) ] = 0,
644 [ C(OP_PREFETCH) ] = {
645 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
646 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
651 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
652 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
655 [ C(RESULT_ACCESS) ] = -1,
656 [ C(RESULT_MISS) ] = -1,
658 [ C(OP_PREFETCH) ] = {
659 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
660 [ C(RESULT_MISS) ] = 0,
665 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
666 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
669 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
670 [ C(RESULT_MISS) ] = 0,
672 [ C(OP_PREFETCH) ] = {
673 [ C(RESULT_ACCESS) ] = 0,
674 [ C(RESULT_MISS) ] = 0,
679 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
680 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
683 [ C(RESULT_ACCESS) ] = 0,
684 [ C(RESULT_MISS) ] = 0,
686 [ C(OP_PREFETCH) ] = {
687 [ C(RESULT_ACCESS) ] = 0,
688 [ C(RESULT_MISS) ] = 0,
693 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
694 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
697 [ C(RESULT_ACCESS) ] = -1,
698 [ C(RESULT_MISS) ] = -1,
700 [ C(OP_PREFETCH) ] = {
701 [ C(RESULT_ACCESS) ] = -1,
702 [ C(RESULT_MISS) ] = -1,
707 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
708 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
711 [ C(RESULT_ACCESS) ] = -1,
712 [ C(RESULT_MISS) ] = -1,
714 [ C(OP_PREFETCH) ] = {
715 [ C(RESULT_ACCESS) ] = -1,
716 [ C(RESULT_MISS) ] = -1,
722 * AMD Performance Monitor K7 and later.
724 static const u64 amd_perfmon_event_map[] =
726 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
727 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
728 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
729 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
730 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
731 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
734 static u64 amd_pmu_event_map(int hw_event)
736 return amd_perfmon_event_map[hw_event];
739 static u64 amd_pmu_raw_event(u64 hw_event)
741 #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
742 #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
743 #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
744 #define K7_EVNTSEL_INV_MASK 0x000800000ULL
745 #define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
747 #define K7_EVNTSEL_MASK \
748 (K7_EVNTSEL_EVENT_MASK | \
749 K7_EVNTSEL_UNIT_MASK | \
750 K7_EVNTSEL_EDGE_MASK | \
751 K7_EVNTSEL_INV_MASK | \
754 return hw_event & K7_EVNTSEL_MASK;
758 * Propagate event elapsed time into the generic event.
759 * Can only be executed on the CPU where the event is active.
760 * Returns the delta events processed.
763 x86_perf_event_update(struct perf_event *event,
764 struct hw_perf_event *hwc, int idx)
766 int shift = 64 - x86_pmu.event_bits;
767 u64 prev_raw_count, new_raw_count;
770 if (idx == X86_PMC_IDX_FIXED_BTS)
774 * Careful: an NMI might modify the previous event value.
776 * Our tactic to handle this is to first atomically read and
777 * exchange a new raw count - then add that new-prev delta
778 * count to the generic event atomically:
781 prev_raw_count = atomic64_read(&hwc->prev_count);
782 rdmsrl(hwc->event_base + idx, new_raw_count);
784 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
785 new_raw_count) != prev_raw_count)
789 * Now we have the new raw value and have updated the prev
790 * timestamp already. We can now calculate the elapsed delta
791 * (event-)time and add that to the generic event.
793 * Careful, not all hw sign-extends above the physical width
796 delta = (new_raw_count << shift) - (prev_raw_count << shift);
799 atomic64_add(delta, &event->count);
800 atomic64_sub(delta, &hwc->period_left);
802 return new_raw_count;
805 static atomic_t active_events;
806 static DEFINE_MUTEX(pmc_reserve_mutex);
808 static bool reserve_pmc_hardware(void)
810 #ifdef CONFIG_X86_LOCAL_APIC
813 if (nmi_watchdog == NMI_LOCAL_APIC)
814 disable_lapic_nmi_watchdog();
816 for (i = 0; i < x86_pmu.num_events; i++) {
817 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
821 for (i = 0; i < x86_pmu.num_events; i++) {
822 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
829 #ifdef CONFIG_X86_LOCAL_APIC
831 for (i--; i >= 0; i--)
832 release_evntsel_nmi(x86_pmu.eventsel + i);
834 i = x86_pmu.num_events;
837 for (i--; i >= 0; i--)
838 release_perfctr_nmi(x86_pmu.perfctr + i);
840 if (nmi_watchdog == NMI_LOCAL_APIC)
841 enable_lapic_nmi_watchdog();
847 static void release_pmc_hardware(void)
849 #ifdef CONFIG_X86_LOCAL_APIC
852 for (i = 0; i < x86_pmu.num_events; i++) {
853 release_perfctr_nmi(x86_pmu.perfctr + i);
854 release_evntsel_nmi(x86_pmu.eventsel + i);
857 if (nmi_watchdog == NMI_LOCAL_APIC)
858 enable_lapic_nmi_watchdog();
862 static inline bool bts_available(void)
864 return x86_pmu.enable_bts != NULL;
867 static inline void init_debug_store_on_cpu(int cpu)
869 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
874 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
875 (u32)((u64)(unsigned long)ds),
876 (u32)((u64)(unsigned long)ds >> 32));
879 static inline void fini_debug_store_on_cpu(int cpu)
881 if (!per_cpu(cpu_hw_events, cpu).ds)
884 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
887 static void release_bts_hardware(void)
891 if (!bts_available())
896 for_each_online_cpu(cpu)
897 fini_debug_store_on_cpu(cpu);
899 for_each_possible_cpu(cpu) {
900 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
905 per_cpu(cpu_hw_events, cpu).ds = NULL;
907 kfree((void *)(unsigned long)ds->bts_buffer_base);
914 static int reserve_bts_hardware(void)
918 if (!bts_available())
923 for_each_possible_cpu(cpu) {
924 struct debug_store *ds;
928 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
929 if (unlikely(!buffer))
932 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
938 ds->bts_buffer_base = (u64)(unsigned long)buffer;
939 ds->bts_index = ds->bts_buffer_base;
940 ds->bts_absolute_maximum =
941 ds->bts_buffer_base + BTS_BUFFER_SIZE;
942 ds->bts_interrupt_threshold =
943 ds->bts_absolute_maximum - BTS_OVFL_TH;
945 per_cpu(cpu_hw_events, cpu).ds = ds;
950 release_bts_hardware();
952 for_each_online_cpu(cpu)
953 init_debug_store_on_cpu(cpu);
961 static void hw_perf_event_destroy(struct perf_event *event)
963 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
964 release_pmc_hardware();
965 release_bts_hardware();
966 mutex_unlock(&pmc_reserve_mutex);
970 static inline int x86_pmu_initialized(void)
972 return x86_pmu.handle_irq != NULL;
976 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
978 unsigned int cache_type, cache_op, cache_result;
981 config = attr->config;
983 cache_type = (config >> 0) & 0xff;
984 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
987 cache_op = (config >> 8) & 0xff;
988 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
991 cache_result = (config >> 16) & 0xff;
992 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
995 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
1008 static void intel_pmu_enable_bts(u64 config)
1010 unsigned long debugctlmsr;
1012 debugctlmsr = get_debugctlmsr();
1014 debugctlmsr |= X86_DEBUGCTL_TR;
1015 debugctlmsr |= X86_DEBUGCTL_BTS;
1016 debugctlmsr |= X86_DEBUGCTL_BTINT;
1018 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
1019 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
1021 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
1022 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
1024 update_debugctlmsr(debugctlmsr);
1027 static void intel_pmu_disable_bts(void)
1029 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1030 unsigned long debugctlmsr;
1035 debugctlmsr = get_debugctlmsr();
1038 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
1039 X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
1041 update_debugctlmsr(debugctlmsr);
1045 * Setup the hardware configuration for a given attr_type
1047 static int __hw_perf_event_init(struct perf_event *event)
1049 struct perf_event_attr *attr = &event->attr;
1050 struct hw_perf_event *hwc = &event->hw;
1054 if (!x86_pmu_initialized())
1058 if (!atomic_inc_not_zero(&active_events)) {
1059 mutex_lock(&pmc_reserve_mutex);
1060 if (atomic_read(&active_events) == 0) {
1061 if (!reserve_pmc_hardware())
1064 err = reserve_bts_hardware();
1067 atomic_inc(&active_events);
1068 mutex_unlock(&pmc_reserve_mutex);
1073 event->destroy = hw_perf_event_destroy;
1076 * Generate PMC IRQs:
1077 * (keep 'enabled' bit clear for now)
1079 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
1084 * Count user and OS events unless requested not to.
1086 if (!attr->exclude_user)
1087 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
1088 if (!attr->exclude_kernel)
1089 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1091 if (!hwc->sample_period) {
1092 hwc->sample_period = x86_pmu.max_period;
1093 hwc->last_period = hwc->sample_period;
1094 atomic64_set(&hwc->period_left, hwc->sample_period);
1097 * If we have a PMU initialized but no APIC
1098 * interrupts, we cannot sample hardware
1099 * events (user-space has to fall back and
1100 * sample via a hrtimer based software event):
1107 * Raw hw_event type provide the config in the hw_event structure
1109 if (attr->type == PERF_TYPE_RAW) {
1110 hwc->config |= x86_pmu.raw_event(attr->config);
1114 if (attr->type == PERF_TYPE_HW_CACHE)
1115 return set_ext_hw_attr(hwc, attr);
1117 if (attr->config >= x86_pmu.max_events)
1123 config = x86_pmu.event_map(attr->config);
1134 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1135 (hwc->sample_period == 1)) {
1136 /* BTS is not supported by this architecture. */
1137 if (!bts_available())
1140 /* BTS is currently only allowed for user-mode. */
1141 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1145 hwc->config |= config;
1150 static void p6_pmu_disable_all(void)
1152 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1161 /* p6 only has one enable register */
1162 rdmsrl(MSR_P6_EVNTSEL0, val);
1163 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1164 wrmsrl(MSR_P6_EVNTSEL0, val);
1167 static void intel_pmu_disable_all(void)
1169 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1177 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1179 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1180 intel_pmu_disable_bts();
1183 static void amd_pmu_disable_all(void)
1185 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1193 * ensure we write the disable before we start disabling the
1194 * events proper, so that amd_pmu_enable_event() does the
1199 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1202 if (!test_bit(idx, cpuc->active_mask))
1204 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
1205 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1207 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1208 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1212 void hw_perf_disable(void)
1214 if (!x86_pmu_initialized())
1216 return x86_pmu.disable_all();
1219 static void p6_pmu_enable_all(void)
1221 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1230 /* p6 only has one enable register */
1231 rdmsrl(MSR_P6_EVNTSEL0, val);
1232 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1233 wrmsrl(MSR_P6_EVNTSEL0, val);
1236 static void intel_pmu_enable_all(void)
1238 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1246 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1248 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1249 struct perf_event *event =
1250 cpuc->events[X86_PMC_IDX_FIXED_BTS];
1252 if (WARN_ON_ONCE(!event))
1255 intel_pmu_enable_bts(event->hw.config);
1259 static void amd_pmu_enable_all(void)
1261 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1270 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1271 struct perf_event *event = cpuc->events[idx];
1274 if (!test_bit(idx, cpuc->active_mask))
1277 val = event->hw.config;
1278 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1279 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1283 void hw_perf_enable(void)
1285 if (!x86_pmu_initialized())
1287 x86_pmu.enable_all();
1290 static inline u64 intel_pmu_get_status(void)
1294 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1299 static inline void intel_pmu_ack_status(u64 ack)
1301 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1304 static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1306 (void)checking_wrmsrl(hwc->config_base + idx,
1307 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
1310 static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1312 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
1316 intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
1318 int idx = __idx - X86_PMC_IDX_FIXED;
1321 mask = 0xfULL << (idx * 4);
1323 rdmsrl(hwc->config_base, ctrl_val);
1325 (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1329 p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1331 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1332 u64 val = P6_NOP_EVENT;
1335 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1337 (void)checking_wrmsrl(hwc->config_base + idx, val);
1341 intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1343 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1344 intel_pmu_disable_bts();
1348 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1349 intel_pmu_disable_fixed(hwc, idx);
1353 x86_pmu_disable_event(hwc, idx);
1357 amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1359 x86_pmu_disable_event(hwc, idx);
1362 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1365 * Set the next IRQ period, based on the hwc->period_left value.
1366 * To be called with the event disabled in hw:
1369 x86_perf_event_set_period(struct perf_event *event,
1370 struct hw_perf_event *hwc, int idx)
1372 s64 left = atomic64_read(&hwc->period_left);
1373 s64 period = hwc->sample_period;
1376 if (idx == X86_PMC_IDX_FIXED_BTS)
1380 * If we are way outside a reasonable range then just skip forward:
1382 if (unlikely(left <= -period)) {
1384 atomic64_set(&hwc->period_left, left);
1385 hwc->last_period = period;
1389 if (unlikely(left <= 0)) {
1391 atomic64_set(&hwc->period_left, left);
1392 hwc->last_period = period;
1396 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1398 if (unlikely(left < 2))
1401 if (left > x86_pmu.max_period)
1402 left = x86_pmu.max_period;
1404 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1407 * The hw event starts counting from this event offset,
1408 * mark it to be able to extra future deltas:
1410 atomic64_set(&hwc->prev_count, (u64)-left);
1412 err = checking_wrmsrl(hwc->event_base + idx,
1413 (u64)(-left) & x86_pmu.event_mask);
1415 perf_event_update_userpage(event);
1421 intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1423 int idx = __idx - X86_PMC_IDX_FIXED;
1424 u64 ctrl_val, bits, mask;
1428 * Enable IRQ generation (0x8),
1429 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1433 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1435 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1439 * ANY bit is supported in v3 and up
1441 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1445 mask = 0xfULL << (idx * 4);
1447 rdmsrl(hwc->config_base, ctrl_val);
1450 err = checking_wrmsrl(hwc->config_base, ctrl_val);
1453 static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1455 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1460 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1462 (void)checking_wrmsrl(hwc->config_base + idx, val);
1466 static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1468 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1469 if (!__get_cpu_var(cpu_hw_events).enabled)
1472 intel_pmu_enable_bts(hwc->config);
1476 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1477 intel_pmu_enable_fixed(hwc, idx);
1481 x86_pmu_enable_event(hwc, idx);
1484 static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1486 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1489 x86_pmu_enable_event(hwc, idx);
1492 static int fixed_mode_idx(struct hw_perf_event *hwc)
1494 unsigned int hw_event;
1496 hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK;
1498 if (unlikely((hw_event ==
1499 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
1500 (hwc->sample_period == 1)))
1501 return X86_PMC_IDX_FIXED_BTS;
1503 if (!x86_pmu.num_events_fixed)
1507 * fixed counters do not take all possible filters
1509 if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK)
1512 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
1513 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
1514 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
1515 return X86_PMC_IDX_FIXED_CPU_CYCLES;
1516 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
1517 return X86_PMC_IDX_FIXED_BUS_CYCLES;
1523 * generic counter allocator: get next free counter
1526 gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
1530 idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events);
1531 return idx == x86_pmu.num_events ? -1 : idx;
1535 * intel-specific counter allocator: check event constraints
1538 intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
1540 const struct event_constraint *event_constraint;
1543 if (!event_constraints)
1546 code = hwc->config & CORE_EVNTSEL_EVENT_MASK;
1548 for_each_event_constraint(event_constraint, event_constraints) {
1549 if (code == event_constraint->code) {
1550 for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) {
1551 if (!test_and_set_bit(i, cpuc->used_mask))
1558 return gen_get_event_idx(cpuc, hwc);
1562 x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
1566 idx = fixed_mode_idx(hwc);
1567 if (idx == X86_PMC_IDX_FIXED_BTS) {
1568 /* BTS is already occupied. */
1569 if (test_and_set_bit(idx, cpuc->used_mask))
1572 hwc->config_base = 0;
1573 hwc->event_base = 0;
1575 } else if (idx >= 0) {
1577 * Try to get the fixed event, if that is already taken
1578 * then try to get a generic event:
1580 if (test_and_set_bit(idx, cpuc->used_mask))
1583 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1585 * We set it so that event_base + idx in wrmsr/rdmsr maps to
1586 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1589 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1593 /* Try to get the previous generic event again */
1594 if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) {
1596 idx = x86_pmu.get_event_idx(cpuc, hwc);
1600 set_bit(idx, cpuc->used_mask);
1603 hwc->config_base = x86_pmu.eventsel;
1604 hwc->event_base = x86_pmu.perfctr;
1611 * Find a PMC slot for the freshly enabled / scheduled in event:
1613 static int x86_pmu_enable(struct perf_event *event)
1615 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1616 struct hw_perf_event *hwc = &event->hw;
1619 idx = x86_schedule_event(cpuc, hwc);
1623 perf_events_lapic_init();
1625 x86_pmu.disable(hwc, idx);
1627 cpuc->events[idx] = event;
1628 set_bit(idx, cpuc->active_mask);
1630 x86_perf_event_set_period(event, hwc, idx);
1631 x86_pmu.enable(hwc, idx);
1633 perf_event_update_userpage(event);
1638 static void x86_pmu_unthrottle(struct perf_event *event)
1640 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1641 struct hw_perf_event *hwc = &event->hw;
1643 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1644 cpuc->events[hwc->idx] != event))
1647 x86_pmu.enable(hwc, hwc->idx);
1650 void perf_event_print_debug(void)
1652 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1653 struct cpu_hw_events *cpuc;
1654 unsigned long flags;
1657 if (!x86_pmu.num_events)
1660 local_irq_save(flags);
1662 cpu = smp_processor_id();
1663 cpuc = &per_cpu(cpu_hw_events, cpu);
1665 if (x86_pmu.version >= 2) {
1666 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1667 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1668 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1669 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1672 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1673 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1674 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1675 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
1677 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
1679 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1680 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1681 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
1683 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1685 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
1686 cpu, idx, pmc_ctrl);
1687 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
1688 cpu, idx, pmc_count);
1689 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
1690 cpu, idx, prev_left);
1692 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1693 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1695 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1696 cpu, idx, pmc_count);
1698 local_irq_restore(flags);
1701 static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
1703 struct debug_store *ds = cpuc->ds;
1709 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
1710 struct bts_record *at, *top;
1711 struct perf_output_handle handle;
1712 struct perf_event_header header;
1713 struct perf_sample_data data;
1714 struct pt_regs regs;
1722 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1723 top = (struct bts_record *)(unsigned long)ds->bts_index;
1728 ds->bts_index = ds->bts_buffer_base;
1730 perf_sample_data_init(&data, 0);
1732 data.period = event->hw.last_period;
1736 * Prepare a generic sample, i.e. fill in the invariant fields.
1737 * We will overwrite the from and to address before we output
1740 perf_prepare_sample(&header, &data, event, ®s);
1742 if (perf_output_begin(&handle, event,
1743 header.size * (top - at), 1, 1))
1746 for (; at < top; at++) {
1750 perf_output_sample(&handle, &header, &data, event);
1753 perf_output_end(&handle);
1755 /* There's new data available. */
1756 event->hw.interrupts++;
1757 event->pending_kill = POLL_IN;
1760 static void x86_pmu_disable(struct perf_event *event)
1762 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1763 struct hw_perf_event *hwc = &event->hw;
1767 * Must be done before we disable, otherwise the nmi handler
1768 * could reenable again:
1770 clear_bit(idx, cpuc->active_mask);
1771 x86_pmu.disable(hwc, idx);
1774 * Make sure the cleared pointer becomes visible before we
1775 * (potentially) free the event:
1780 * Drain the remaining delta count out of a event
1781 * that we are disabling:
1783 x86_perf_event_update(event, hwc, idx);
1785 /* Drain the remaining BTS records. */
1786 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1787 intel_pmu_drain_bts_buffer(cpuc);
1789 cpuc->events[idx] = NULL;
1790 clear_bit(idx, cpuc->used_mask);
1792 perf_event_update_userpage(event);
1796 * Save and restart an expired event. Called by NMI contexts,
1797 * so it has to be careful about preempting normal event ops:
1799 static int intel_pmu_save_and_restart(struct perf_event *event)
1801 struct hw_perf_event *hwc = &event->hw;
1805 x86_perf_event_update(event, hwc, idx);
1806 ret = x86_perf_event_set_period(event, hwc, idx);
1808 if (event->state == PERF_EVENT_STATE_ACTIVE)
1809 intel_pmu_enable_event(hwc, idx);
1814 static void intel_pmu_reset(void)
1816 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
1817 unsigned long flags;
1820 if (!x86_pmu.num_events)
1823 local_irq_save(flags);
1825 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1827 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1828 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1829 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
1831 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1832 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1835 ds->bts_index = ds->bts_buffer_base;
1837 local_irq_restore(flags);
1840 static int p6_pmu_handle_irq(struct pt_regs *regs)
1842 struct perf_sample_data data;
1843 struct cpu_hw_events *cpuc;
1844 struct perf_event *event;
1845 struct hw_perf_event *hwc;
1846 int idx, handled = 0;
1849 perf_sample_data_init(&data, 0);
1851 cpuc = &__get_cpu_var(cpu_hw_events);
1853 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1854 if (!test_bit(idx, cpuc->active_mask))
1857 event = cpuc->events[idx];
1860 val = x86_perf_event_update(event, hwc, idx);
1861 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1868 data.period = event->hw.last_period;
1870 if (!x86_perf_event_set_period(event, hwc, idx))
1873 if (perf_event_overflow(event, 1, &data, regs))
1874 p6_pmu_disable_event(hwc, idx);
1878 inc_irq_stat(apic_perf_irqs);
1884 * This handler is triggered by the local APIC, so the APIC IRQ handling
1887 static int intel_pmu_handle_irq(struct pt_regs *regs)
1889 struct perf_sample_data data;
1890 struct cpu_hw_events *cpuc;
1894 perf_sample_data_init(&data, 0);
1896 cpuc = &__get_cpu_var(cpu_hw_events);
1899 intel_pmu_drain_bts_buffer(cpuc);
1900 status = intel_pmu_get_status();
1908 if (++loops > 100) {
1909 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1910 perf_event_print_debug();
1916 inc_irq_stat(apic_perf_irqs);
1918 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1919 struct perf_event *event = cpuc->events[bit];
1921 clear_bit(bit, (unsigned long *) &status);
1922 if (!test_bit(bit, cpuc->active_mask))
1925 if (!intel_pmu_save_and_restart(event))
1928 data.period = event->hw.last_period;
1930 if (perf_event_overflow(event, 1, &data, regs))
1931 intel_pmu_disable_event(&event->hw, bit);
1934 intel_pmu_ack_status(ack);
1937 * Repeat if there is more work to be done:
1939 status = intel_pmu_get_status();
1948 static int amd_pmu_handle_irq(struct pt_regs *regs)
1950 struct perf_sample_data data;
1951 struct cpu_hw_events *cpuc;
1952 struct perf_event *event;
1953 struct hw_perf_event *hwc;
1954 int idx, handled = 0;
1960 cpuc = &__get_cpu_var(cpu_hw_events);
1962 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1963 if (!test_bit(idx, cpuc->active_mask))
1966 event = cpuc->events[idx];
1969 val = x86_perf_event_update(event, hwc, idx);
1970 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1977 data.period = event->hw.last_period;
1979 if (!x86_perf_event_set_period(event, hwc, idx))
1982 if (perf_event_overflow(event, 1, &data, regs))
1983 amd_pmu_disable_event(hwc, idx);
1987 inc_irq_stat(apic_perf_irqs);
1992 void smp_perf_pending_interrupt(struct pt_regs *regs)
1996 inc_irq_stat(apic_pending_irqs);
1997 perf_event_do_pending();
2001 void set_perf_event_pending(void)
2003 #ifdef CONFIG_X86_LOCAL_APIC
2004 if (!x86_pmu.apic || !x86_pmu_initialized())
2007 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
2011 void perf_events_lapic_init(void)
2013 #ifdef CONFIG_X86_LOCAL_APIC
2014 if (!x86_pmu.apic || !x86_pmu_initialized())
2018 * Always use NMI for PMU
2020 apic_write(APIC_LVTPC, APIC_DM_NMI);
2024 static int __kprobes
2025 perf_event_nmi_handler(struct notifier_block *self,
2026 unsigned long cmd, void *__args)
2028 struct die_args *args = __args;
2029 struct pt_regs *regs;
2031 if (!atomic_read(&active_events))
2045 #ifdef CONFIG_X86_LOCAL_APIC
2046 apic_write(APIC_LVTPC, APIC_DM_NMI);
2049 * Can't rely on the handled return value to say it was our NMI, two
2050 * events could trigger 'simultaneously' raising two back-to-back NMIs.
2052 * If the first NMI handles both, the latter will be empty and daze
2055 x86_pmu.handle_irq(regs);
2060 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
2061 .notifier_call = perf_event_nmi_handler,
2066 static __initconst struct x86_pmu p6_pmu = {
2068 .handle_irq = p6_pmu_handle_irq,
2069 .disable_all = p6_pmu_disable_all,
2070 .enable_all = p6_pmu_enable_all,
2071 .enable = p6_pmu_enable_event,
2072 .disable = p6_pmu_disable_event,
2073 .eventsel = MSR_P6_EVNTSEL0,
2074 .perfctr = MSR_P6_PERFCTR0,
2075 .event_map = p6_pmu_event_map,
2076 .raw_event = p6_pmu_raw_event,
2077 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
2079 .max_period = (1ULL << 31) - 1,
2083 * Events have 40 bits implemented. However they are designed such
2084 * that bits [32-39] are sign extensions of bit 31. As such the
2085 * effective width of a event for P6-like PMU is 32 bits only.
2087 * See IA-32 Intel Architecture Software developer manual Vol 3B
2090 .event_mask = (1ULL << 32) - 1,
2091 .get_event_idx = intel_get_event_idx,
2094 static __initconst struct x86_pmu intel_pmu = {
2096 .handle_irq = intel_pmu_handle_irq,
2097 .disable_all = intel_pmu_disable_all,
2098 .enable_all = intel_pmu_enable_all,
2099 .enable = intel_pmu_enable_event,
2100 .disable = intel_pmu_disable_event,
2101 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2102 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2103 .event_map = intel_pmu_event_map,
2104 .raw_event = intel_pmu_raw_event,
2105 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2108 * Intel PMCs cannot be accessed sanely above 32 bit width,
2109 * so we install an artificial 1<<31 period regardless of
2110 * the generic event period:
2112 .max_period = (1ULL << 31) - 1,
2113 .enable_bts = intel_pmu_enable_bts,
2114 .disable_bts = intel_pmu_disable_bts,
2115 .get_event_idx = intel_get_event_idx,
2118 static __initconst struct x86_pmu amd_pmu = {
2120 .handle_irq = amd_pmu_handle_irq,
2121 .disable_all = amd_pmu_disable_all,
2122 .enable_all = amd_pmu_enable_all,
2123 .enable = amd_pmu_enable_event,
2124 .disable = amd_pmu_disable_event,
2125 .eventsel = MSR_K7_EVNTSEL0,
2126 .perfctr = MSR_K7_PERFCTR0,
2127 .event_map = amd_pmu_event_map,
2128 .raw_event = amd_pmu_raw_event,
2129 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
2132 .event_mask = (1ULL << 48) - 1,
2134 /* use highest bit to detect overflow */
2135 .max_period = (1ULL << 47) - 1,
2136 .get_event_idx = gen_get_event_idx,
2139 static __init int p6_pmu_init(void)
2141 switch (boot_cpu_data.x86_model) {
2143 case 3: /* Pentium Pro */
2145 case 6: /* Pentium II */
2148 case 11: /* Pentium III */
2149 event_constraints = intel_p6_event_constraints;
2154 event_constraints = intel_p6_event_constraints;
2157 pr_cont("unsupported p6 CPU model %d ",
2158 boot_cpu_data.x86_model);
2167 static __init int intel_pmu_init(void)
2169 union cpuid10_edx edx;
2170 union cpuid10_eax eax;
2171 unsigned int unused;
2175 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2176 /* check for P6 processor family */
2177 if (boot_cpu_data.x86 == 6) {
2178 return p6_pmu_init();
2185 * Check whether the Architectural PerfMon supports
2186 * Branch Misses Retired hw_event or not.
2188 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
2189 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
2192 version = eax.split.version_id;
2196 x86_pmu = intel_pmu;
2197 x86_pmu.version = version;
2198 x86_pmu.num_events = eax.split.num_events;
2199 x86_pmu.event_bits = eax.split.bit_width;
2200 x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
2203 * Quirk: v2 perfmon does not report fixed-purpose events, so
2204 * assume at least 3 events:
2206 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
2209 * Install the hw-cache-events table:
2211 switch (boot_cpu_data.x86_model) {
2213 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2214 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2215 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2216 case 29: /* six-core 45 nm xeon "Dunnington" */
2217 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2218 sizeof(hw_cache_event_ids));
2220 pr_cont("Core2 events, ");
2221 event_constraints = intel_core_event_constraints;
2224 case 26: /* 45 nm nehalem, "Bloomfield" */
2225 case 30: /* 45 nm nehalem, "Lynnfield" */
2226 case 46: /* 45 nm nehalem-ex, "Beckton" */
2227 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2228 sizeof(hw_cache_event_ids));
2230 event_constraints = intel_nehalem_event_constraints;
2231 pr_cont("Nehalem/Corei7 events, ");
2234 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2235 sizeof(hw_cache_event_ids));
2237 pr_cont("Atom events, ");
2240 case 37: /* 32 nm nehalem, "Clarkdale" */
2241 case 44: /* 32 nm nehalem, "Gulftown" */
2242 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
2243 sizeof(hw_cache_event_ids));
2245 pr_cont("Westmere events, ");
2251 static __init int amd_pmu_init(void)
2253 /* Performance-monitoring supported from K7 and later: */
2254 if (boot_cpu_data.x86 < 6)
2259 /* Events are common for all AMDs */
2260 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2261 sizeof(hw_cache_event_ids));
2266 static void __init pmu_check_apic(void)
2272 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2273 pr_info("no hardware sampling interrupt available.\n");
2276 void __init init_hw_perf_events(void)
2280 pr_info("Performance Events: ");
2282 switch (boot_cpu_data.x86_vendor) {
2283 case X86_VENDOR_INTEL:
2284 err = intel_pmu_init();
2286 case X86_VENDOR_AMD:
2287 err = amd_pmu_init();
2293 pr_cont("no PMU driver, software events only.\n");
2299 pr_cont("%s PMU driver.\n", x86_pmu.name);
2301 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2302 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2303 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2304 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
2306 perf_event_mask = (1 << x86_pmu.num_events) - 1;
2307 perf_max_events = x86_pmu.num_events;
2309 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2310 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2311 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2312 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
2316 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2317 x86_pmu.intel_ctrl = perf_event_mask;
2319 perf_events_lapic_init();
2320 register_die_notifier(&perf_event_nmi_notifier);
2322 pr_info("... version: %d\n", x86_pmu.version);
2323 pr_info("... bit width: %d\n", x86_pmu.event_bits);
2324 pr_info("... generic registers: %d\n", x86_pmu.num_events);
2325 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
2326 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2327 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
2328 pr_info("... event mask: %016Lx\n", perf_event_mask);
2331 static inline void x86_pmu_read(struct perf_event *event)
2333 x86_perf_event_update(event, &event->hw, event->hw.idx);
2336 static const struct pmu pmu = {
2337 .enable = x86_pmu_enable,
2338 .disable = x86_pmu_disable,
2339 .read = x86_pmu_read,
2340 .unthrottle = x86_pmu_unthrottle,
2344 validate_event(struct cpu_hw_events *cpuc, struct perf_event *event)
2346 struct hw_perf_event fake_event = event->hw;
2348 if (event->pmu && event->pmu != &pmu)
2351 return x86_schedule_event(cpuc, &fake_event) >= 0;
2354 static int validate_group(struct perf_event *event)
2356 struct perf_event *sibling, *leader = event->group_leader;
2357 struct cpu_hw_events fake_pmu;
2359 memset(&fake_pmu, 0, sizeof(fake_pmu));
2361 if (!validate_event(&fake_pmu, leader))
2364 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
2365 if (!validate_event(&fake_pmu, sibling))
2369 if (!validate_event(&fake_pmu, event))
2375 const struct pmu *hw_perf_event_init(struct perf_event *event)
2379 err = __hw_perf_event_init(event);
2381 if (event->group_leader != event)
2382 err = validate_group(event);
2386 event->destroy(event);
2387 return ERR_PTR(err);
2398 void callchain_store(struct perf_callchain_entry *entry, u64 ip)
2400 if (entry->nr < PERF_MAX_STACK_DEPTH)
2401 entry->ip[entry->nr++] = ip;
2404 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2405 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
2406 static DEFINE_PER_CPU(int, in_ignored_frame);
2410 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2412 /* Ignore warnings */
2415 static void backtrace_warning(void *data, char *msg)
2417 /* Ignore warnings */
2420 static int backtrace_stack(void *data, char *name)
2422 per_cpu(in_ignored_frame, smp_processor_id()) =
2423 x86_is_stack_id(NMI_STACK, name) ||
2424 x86_is_stack_id(DEBUG_STACK, name);
2429 static void backtrace_address(void *data, unsigned long addr, int reliable)
2431 struct perf_callchain_entry *entry = data;
2433 if (per_cpu(in_ignored_frame, smp_processor_id()))
2437 callchain_store(entry, addr);
2440 static const struct stacktrace_ops backtrace_ops = {
2441 .warning = backtrace_warning,
2442 .warning_symbol = backtrace_warning_symbol,
2443 .stack = backtrace_stack,
2444 .address = backtrace_address,
2445 .walk_stack = print_context_stack_bp,
2448 #include "../dumpstack.h"
2451 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2453 callchain_store(entry, PERF_CONTEXT_KERNEL);
2454 callchain_store(entry, regs->ip);
2456 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
2460 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2462 static unsigned long
2463 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
2465 unsigned long offset, addr = (unsigned long)from;
2466 int type = in_nmi() ? KM_NMI : KM_IRQ0;
2467 unsigned long size, len = 0;
2473 ret = __get_user_pages_fast(addr, 1, 0, &page);
2477 offset = addr & (PAGE_SIZE - 1);
2478 size = min(PAGE_SIZE - offset, n - len);
2480 map = kmap_atomic(page, type);
2481 memcpy(to, map+offset, size);
2482 kunmap_atomic(map, type);
2494 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2496 unsigned long bytes;
2498 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2500 return bytes == sizeof(*frame);
2504 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2506 struct stack_frame frame;
2507 const void __user *fp;
2509 if (!user_mode(regs))
2510 regs = task_pt_regs(current);
2512 fp = (void __user *)regs->bp;
2514 callchain_store(entry, PERF_CONTEXT_USER);
2515 callchain_store(entry, regs->ip);
2517 while (entry->nr < PERF_MAX_STACK_DEPTH) {
2518 frame.next_frame = NULL;
2519 frame.return_address = 0;
2521 if (!copy_stack_frame(fp, &frame))
2524 if ((unsigned long)fp < regs->sp)
2527 callchain_store(entry, frame.return_address);
2528 fp = frame.next_frame;
2533 perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2540 is_user = user_mode(regs);
2542 if (!current || current->pid == 0)
2545 if (is_user && current->state != TASK_RUNNING)
2549 perf_callchain_kernel(regs, entry);
2552 perf_callchain_user(regs, entry);
2555 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2557 struct perf_callchain_entry *entry;
2560 entry = &__get_cpu_var(pmc_nmi_entry);
2562 entry = &__get_cpu_var(pmc_irq_entry);
2566 perf_do_callchain(regs, entry);
2571 void hw_perf_event_setup_online(int cpu)
2573 init_debug_store_on_cpu(cpu);