2 * Performance events x86 architecture code
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
12 * For licencing details see kernel-base/COPYING
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/slab.h>
25 #include <linux/cpu.h>
26 #include <linux/bitops.h>
27 #include <linux/device.h>
30 #include <asm/stacktrace.h>
33 #include <asm/alternative.h>
34 #include <asm/timer.h>
38 #include "perf_event.h"
40 struct x86_pmu x86_pmu __read_mostly;
42 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
46 u64 __read_mostly hw_cache_event_ids
47 [PERF_COUNT_HW_CACHE_MAX]
48 [PERF_COUNT_HW_CACHE_OP_MAX]
49 [PERF_COUNT_HW_CACHE_RESULT_MAX];
50 u64 __read_mostly hw_cache_extra_regs
51 [PERF_COUNT_HW_CACHE_MAX]
52 [PERF_COUNT_HW_CACHE_OP_MAX]
53 [PERF_COUNT_HW_CACHE_RESULT_MAX];
56 * Propagate event elapsed time into the generic event.
57 * Can only be executed on the CPU where the event is active.
58 * Returns the delta events processed.
60 u64 x86_perf_event_update(struct perf_event *event)
62 struct hw_perf_event *hwc = &event->hw;
63 int shift = 64 - x86_pmu.cntval_bits;
64 u64 prev_raw_count, new_raw_count;
68 if (idx == INTEL_PMC_IDX_FIXED_BTS)
72 * Careful: an NMI might modify the previous event value.
74 * Our tactic to handle this is to first atomically read and
75 * exchange a new raw count - then add that new-prev delta
76 * count to the generic event atomically:
79 prev_raw_count = local64_read(&hwc->prev_count);
80 rdpmcl(hwc->event_base_rdpmc, new_raw_count);
82 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
83 new_raw_count) != prev_raw_count)
87 * Now we have the new raw value and have updated the prev
88 * timestamp already. We can now calculate the elapsed delta
89 * (event-)time and add that to the generic event.
91 * Careful, not all hw sign-extends above the physical width
94 delta = (new_raw_count << shift) - (prev_raw_count << shift);
97 local64_add(delta, &event->count);
98 local64_sub(delta, &hwc->period_left);
100 return new_raw_count;
104 * Find and validate any extra registers to set up.
106 static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
108 struct hw_perf_event_extra *reg;
109 struct extra_reg *er;
111 reg = &event->hw.extra_reg;
113 if (!x86_pmu.extra_regs)
116 for (er = x86_pmu.extra_regs; er->msr; er++) {
117 if (er->event != (config & er->config_mask))
119 if (event->attr.config1 & ~er->valid_mask)
123 reg->config = event->attr.config1;
130 static atomic_t active_events;
131 static DEFINE_MUTEX(pmc_reserve_mutex);
133 #ifdef CONFIG_X86_LOCAL_APIC
135 static bool reserve_pmc_hardware(void)
139 for (i = 0; i < x86_pmu.num_counters; i++) {
140 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
144 for (i = 0; i < x86_pmu.num_counters; i++) {
145 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
152 for (i--; i >= 0; i--)
153 release_evntsel_nmi(x86_pmu_config_addr(i));
155 i = x86_pmu.num_counters;
158 for (i--; i >= 0; i--)
159 release_perfctr_nmi(x86_pmu_event_addr(i));
164 static void release_pmc_hardware(void)
168 for (i = 0; i < x86_pmu.num_counters; i++) {
169 release_perfctr_nmi(x86_pmu_event_addr(i));
170 release_evntsel_nmi(x86_pmu_config_addr(i));
176 static bool reserve_pmc_hardware(void) { return true; }
177 static void release_pmc_hardware(void) {}
181 static bool check_hw_exists(void)
183 u64 val, val_new = ~0;
187 * Check to see if the BIOS enabled any of the counters, if so
190 for (i = 0; i < x86_pmu.num_counters; i++) {
191 reg = x86_pmu_config_addr(i);
192 ret = rdmsrl_safe(reg, &val);
195 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
199 if (x86_pmu.num_counters_fixed) {
200 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
201 ret = rdmsrl_safe(reg, &val);
204 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
205 if (val & (0x03 << i*4))
211 * Read the current value, change it and read it back to see if it
212 * matches, this is needed to detect certain hardware emulators
213 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
215 reg = x86_pmu_event_addr(0);
216 if (rdmsrl_safe(reg, &val))
219 ret = wrmsrl_safe(reg, val);
220 ret |= rdmsrl_safe(reg, &val_new);
221 if (ret || val != val_new)
228 * We still allow the PMU driver to operate:
230 printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
231 printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val);
236 printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
237 printk(KERN_ERR "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new);
242 static void hw_perf_event_destroy(struct perf_event *event)
244 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
245 release_pmc_hardware();
246 release_ds_buffers();
247 mutex_unlock(&pmc_reserve_mutex);
251 static inline int x86_pmu_initialized(void)
253 return x86_pmu.handle_irq != NULL;
257 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
259 struct perf_event_attr *attr = &event->attr;
260 unsigned int cache_type, cache_op, cache_result;
263 config = attr->config;
265 cache_type = (config >> 0) & 0xff;
266 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
269 cache_op = (config >> 8) & 0xff;
270 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
273 cache_result = (config >> 16) & 0xff;
274 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
277 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
286 attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
287 return x86_pmu_extra_regs(val, event);
290 int x86_setup_perfctr(struct perf_event *event)
292 struct perf_event_attr *attr = &event->attr;
293 struct hw_perf_event *hwc = &event->hw;
296 if (!is_sampling_event(event)) {
297 hwc->sample_period = x86_pmu.max_period;
298 hwc->last_period = hwc->sample_period;
299 local64_set(&hwc->period_left, hwc->sample_period);
302 * If we have a PMU initialized but no APIC
303 * interrupts, we cannot sample hardware
304 * events (user-space has to fall back and
305 * sample via a hrtimer based software event):
311 if (attr->type == PERF_TYPE_RAW)
312 return x86_pmu_extra_regs(event->attr.config, event);
314 if (attr->type == PERF_TYPE_HW_CACHE)
315 return set_ext_hw_attr(hwc, event);
317 if (attr->config >= x86_pmu.max_events)
323 config = x86_pmu.event_map(attr->config);
334 if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
335 !attr->freq && hwc->sample_period == 1) {
336 /* BTS is not supported by this architecture. */
337 if (!x86_pmu.bts_active)
340 /* BTS is currently only allowed for user-mode. */
341 if (!attr->exclude_kernel)
344 if (!attr->exclude_guest)
348 hwc->config |= config;
354 * check that branch_sample_type is compatible with
355 * settings needed for precise_ip > 1 which implies
356 * using the LBR to capture ALL taken branches at the
357 * priv levels of the measurement
359 static inline int precise_br_compat(struct perf_event *event)
361 u64 m = event->attr.branch_sample_type;
364 /* must capture all branches */
365 if (!(m & PERF_SAMPLE_BRANCH_ANY))
368 m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER;
370 if (!event->attr.exclude_user)
371 b |= PERF_SAMPLE_BRANCH_USER;
373 if (!event->attr.exclude_kernel)
374 b |= PERF_SAMPLE_BRANCH_KERNEL;
377 * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86
383 int x86_pmu_hw_config(struct perf_event *event)
385 if (event->attr.precise_ip) {
388 if (!event->attr.exclude_guest)
391 /* Support for constant skid */
392 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
395 /* Support for IP fixup */
400 if (event->attr.precise_ip > precise)
403 * check that PEBS LBR correction does not conflict with
404 * whatever the user is asking with attr->branch_sample_type
406 if (event->attr.precise_ip > 1) {
407 u64 *br_type = &event->attr.branch_sample_type;
409 if (has_branch_stack(event)) {
410 if (!precise_br_compat(event))
413 /* branch_sample_type is compatible */
417 * user did not specify branch_sample_type
419 * For PEBS fixups, we capture all
420 * the branches at the priv level of the
423 *br_type = PERF_SAMPLE_BRANCH_ANY;
425 if (!event->attr.exclude_user)
426 *br_type |= PERF_SAMPLE_BRANCH_USER;
428 if (!event->attr.exclude_kernel)
429 *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
436 * (keep 'enabled' bit clear for now)
438 event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
441 * Count user and OS events unless requested not to
443 if (!event->attr.exclude_user)
444 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
445 if (!event->attr.exclude_kernel)
446 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
448 if (event->attr.type == PERF_TYPE_RAW)
449 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
451 return x86_setup_perfctr(event);
455 * Setup the hardware configuration for a given attr_type
457 static int __x86_pmu_event_init(struct perf_event *event)
461 if (!x86_pmu_initialized())
465 if (!atomic_inc_not_zero(&active_events)) {
466 mutex_lock(&pmc_reserve_mutex);
467 if (atomic_read(&active_events) == 0) {
468 if (!reserve_pmc_hardware())
471 reserve_ds_buffers();
474 atomic_inc(&active_events);
475 mutex_unlock(&pmc_reserve_mutex);
480 event->destroy = hw_perf_event_destroy;
483 event->hw.last_cpu = -1;
484 event->hw.last_tag = ~0ULL;
487 event->hw.extra_reg.idx = EXTRA_REG_NONE;
488 event->hw.branch_reg.idx = EXTRA_REG_NONE;
490 return x86_pmu.hw_config(event);
493 void x86_pmu_disable_all(void)
495 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
498 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
501 if (!test_bit(idx, cpuc->active_mask))
503 rdmsrl(x86_pmu_config_addr(idx), val);
504 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
506 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
507 wrmsrl(x86_pmu_config_addr(idx), val);
511 static void x86_pmu_disable(struct pmu *pmu)
513 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
515 if (!x86_pmu_initialized())
525 x86_pmu.disable_all();
528 void x86_pmu_enable_all(int added)
530 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
533 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
534 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
536 if (!test_bit(idx, cpuc->active_mask))
539 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
543 static struct pmu pmu;
545 static inline int is_x86_event(struct perf_event *event)
547 return event->pmu == &pmu;
551 * Event scheduler state:
553 * Assign events iterating over all events and counters, beginning
554 * with events with least weights first. Keep the current iterator
555 * state in struct sched_state.
559 int event; /* event index */
560 int counter; /* counter index */
561 int unassigned; /* number of events to be assigned left */
562 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
565 /* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
566 #define SCHED_STATES_MAX 2
571 struct event_constraint **constraints;
572 struct sched_state state;
574 struct sched_state saved[SCHED_STATES_MAX];
578 * Initialize interator that runs through all events and counters.
580 static void perf_sched_init(struct perf_sched *sched, struct event_constraint **c,
581 int num, int wmin, int wmax)
585 memset(sched, 0, sizeof(*sched));
586 sched->max_events = num;
587 sched->max_weight = wmax;
588 sched->constraints = c;
590 for (idx = 0; idx < num; idx++) {
591 if (c[idx]->weight == wmin)
595 sched->state.event = idx; /* start with min weight */
596 sched->state.weight = wmin;
597 sched->state.unassigned = num;
600 static void perf_sched_save_state(struct perf_sched *sched)
602 if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
605 sched->saved[sched->saved_states] = sched->state;
606 sched->saved_states++;
609 static bool perf_sched_restore_state(struct perf_sched *sched)
611 if (!sched->saved_states)
614 sched->saved_states--;
615 sched->state = sched->saved[sched->saved_states];
617 /* continue with next counter: */
618 clear_bit(sched->state.counter++, sched->state.used);
624 * Select a counter for the current event to schedule. Return true on
627 static bool __perf_sched_find_counter(struct perf_sched *sched)
629 struct event_constraint *c;
632 if (!sched->state.unassigned)
635 if (sched->state.event >= sched->max_events)
638 c = sched->constraints[sched->state.event];
640 /* Prefer fixed purpose counters */
641 if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
642 idx = INTEL_PMC_IDX_FIXED;
643 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
644 if (!__test_and_set_bit(idx, sched->state.used))
648 /* Grab the first unused counter starting with idx */
649 idx = sched->state.counter;
650 for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
651 if (!__test_and_set_bit(idx, sched->state.used))
658 sched->state.counter = idx;
661 perf_sched_save_state(sched);
666 static bool perf_sched_find_counter(struct perf_sched *sched)
668 while (!__perf_sched_find_counter(sched)) {
669 if (!perf_sched_restore_state(sched))
677 * Go through all unassigned events and find the next one to schedule.
678 * Take events with the least weight first. Return true on success.
680 static bool perf_sched_next_event(struct perf_sched *sched)
682 struct event_constraint *c;
684 if (!sched->state.unassigned || !--sched->state.unassigned)
689 sched->state.event++;
690 if (sched->state.event >= sched->max_events) {
692 sched->state.event = 0;
693 sched->state.weight++;
694 if (sched->state.weight > sched->max_weight)
697 c = sched->constraints[sched->state.event];
698 } while (c->weight != sched->state.weight);
700 sched->state.counter = 0; /* start with first counter */
706 * Assign a counter for each event.
708 int perf_assign_events(struct event_constraint **constraints, int n,
709 int wmin, int wmax, int *assign)
711 struct perf_sched sched;
713 perf_sched_init(&sched, constraints, n, wmin, wmax);
716 if (!perf_sched_find_counter(&sched))
719 assign[sched.state.event] = sched.state.counter;
720 } while (perf_sched_next_event(&sched));
722 return sched.state.unassigned;
725 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
727 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
728 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
729 int i, wmin, wmax, num = 0;
730 struct hw_perf_event *hwc;
732 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
734 for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
735 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
737 wmin = min(wmin, c->weight);
738 wmax = max(wmax, c->weight);
742 * fastpath, try to reuse previous register
744 for (i = 0; i < n; i++) {
745 hwc = &cpuc->event_list[i]->hw;
752 /* constraint still honored */
753 if (!test_bit(hwc->idx, c->idxmsk))
756 /* not already used */
757 if (test_bit(hwc->idx, used_mask))
760 __set_bit(hwc->idx, used_mask);
762 assign[i] = hwc->idx;
767 num = perf_assign_events(constraints, n, wmin, wmax, assign);
770 * scheduling failed or is just a simulation,
771 * free resources if necessary
773 if (!assign || num) {
774 for (i = 0; i < n; i++) {
775 if (x86_pmu.put_event_constraints)
776 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
779 return num ? -EINVAL : 0;
783 * dogrp: true if must collect siblings events (group)
784 * returns total number of events and error code
786 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
788 struct perf_event *event;
791 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
793 /* current number of events already accepted */
796 if (is_x86_event(leader)) {
799 cpuc->event_list[n] = leader;
805 list_for_each_entry(event, &leader->sibling_list, group_entry) {
806 if (!is_x86_event(event) ||
807 event->state <= PERF_EVENT_STATE_OFF)
813 cpuc->event_list[n] = event;
819 static inline void x86_assign_hw_event(struct perf_event *event,
820 struct cpu_hw_events *cpuc, int i)
822 struct hw_perf_event *hwc = &event->hw;
824 hwc->idx = cpuc->assign[i];
825 hwc->last_cpu = smp_processor_id();
826 hwc->last_tag = ++cpuc->tags[i];
828 if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
829 hwc->config_base = 0;
831 } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
832 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
833 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
834 hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
836 hwc->config_base = x86_pmu_config_addr(hwc->idx);
837 hwc->event_base = x86_pmu_event_addr(hwc->idx);
838 hwc->event_base_rdpmc = hwc->idx;
842 static inline int match_prev_assignment(struct hw_perf_event *hwc,
843 struct cpu_hw_events *cpuc,
846 return hwc->idx == cpuc->assign[i] &&
847 hwc->last_cpu == smp_processor_id() &&
848 hwc->last_tag == cpuc->tags[i];
851 static void x86_pmu_start(struct perf_event *event, int flags);
853 static void x86_pmu_enable(struct pmu *pmu)
855 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
856 struct perf_event *event;
857 struct hw_perf_event *hwc;
858 int i, added = cpuc->n_added;
860 if (!x86_pmu_initialized())
867 int n_running = cpuc->n_events - cpuc->n_added;
869 * apply assignment obtained either from
870 * hw_perf_group_sched_in() or x86_pmu_enable()
872 * step1: save events moving to new counters
873 * step2: reprogram moved events into new counters
875 for (i = 0; i < n_running; i++) {
876 event = cpuc->event_list[i];
880 * we can avoid reprogramming counter if:
881 * - assigned same counter as last time
882 * - running on same CPU as last time
883 * - no other event has used the counter since
885 if (hwc->idx == -1 ||
886 match_prev_assignment(hwc, cpuc, i))
890 * Ensure we don't accidentally enable a stopped
891 * counter simply because we rescheduled.
893 if (hwc->state & PERF_HES_STOPPED)
894 hwc->state |= PERF_HES_ARCH;
896 x86_pmu_stop(event, PERF_EF_UPDATE);
899 for (i = 0; i < cpuc->n_events; i++) {
900 event = cpuc->event_list[i];
903 if (!match_prev_assignment(hwc, cpuc, i))
904 x86_assign_hw_event(event, cpuc, i);
905 else if (i < n_running)
908 if (hwc->state & PERF_HES_ARCH)
911 x86_pmu_start(event, PERF_EF_RELOAD);
914 perf_events_lapic_init();
920 x86_pmu.enable_all(added);
923 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
926 * Set the next IRQ period, based on the hwc->period_left value.
927 * To be called with the event disabled in hw:
929 int x86_perf_event_set_period(struct perf_event *event)
931 struct hw_perf_event *hwc = &event->hw;
932 s64 left = local64_read(&hwc->period_left);
933 s64 period = hwc->sample_period;
934 int ret = 0, idx = hwc->idx;
936 if (idx == INTEL_PMC_IDX_FIXED_BTS)
940 * If we are way outside a reasonable range then just skip forward:
942 if (unlikely(left <= -period)) {
944 local64_set(&hwc->period_left, left);
945 hwc->last_period = period;
949 if (unlikely(left <= 0)) {
951 local64_set(&hwc->period_left, left);
952 hwc->last_period = period;
956 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
958 if (unlikely(left < 2))
961 if (left > x86_pmu.max_period)
962 left = x86_pmu.max_period;
964 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
967 * The hw event starts counting from this event offset,
968 * mark it to be able to extra future deltas:
970 local64_set(&hwc->prev_count, (u64)-left);
972 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
975 * Due to erratum on certan cpu we need
976 * a second write to be sure the register
977 * is updated properly
979 if (x86_pmu.perfctr_second_write) {
980 wrmsrl(hwc->event_base,
981 (u64)(-left) & x86_pmu.cntval_mask);
984 perf_event_update_userpage(event);
989 void x86_pmu_enable_event(struct perf_event *event)
991 if (__this_cpu_read(cpu_hw_events.enabled))
992 __x86_pmu_enable_event(&event->hw,
993 ARCH_PERFMON_EVENTSEL_ENABLE);
997 * Add a single event to the PMU.
999 * The event is added to the group of enabled events
1000 * but only if it can be scehduled with existing events.
1002 static int x86_pmu_add(struct perf_event *event, int flags)
1004 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1005 struct hw_perf_event *hwc;
1006 int assign[X86_PMC_IDX_MAX];
1011 perf_pmu_disable(event->pmu);
1012 n0 = cpuc->n_events;
1013 ret = n = collect_events(cpuc, event, false);
1017 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1018 if (!(flags & PERF_EF_START))
1019 hwc->state |= PERF_HES_ARCH;
1022 * If group events scheduling transaction was started,
1023 * skip the schedulability test here, it will be performed
1024 * at commit time (->commit_txn) as a whole
1026 if (cpuc->group_flag & PERF_EVENT_TXN)
1029 ret = x86_pmu.schedule_events(cpuc, n, assign);
1033 * copy new assignment, now we know it is possible
1034 * will be used by hw_perf_enable()
1036 memcpy(cpuc->assign, assign, n*sizeof(int));
1040 cpuc->n_added += n - n0;
1041 cpuc->n_txn += n - n0;
1045 perf_pmu_enable(event->pmu);
1049 static void x86_pmu_start(struct perf_event *event, int flags)
1051 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1052 int idx = event->hw.idx;
1054 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1057 if (WARN_ON_ONCE(idx == -1))
1060 if (flags & PERF_EF_RELOAD) {
1061 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1062 x86_perf_event_set_period(event);
1065 event->hw.state = 0;
1067 cpuc->events[idx] = event;
1068 __set_bit(idx, cpuc->active_mask);
1069 __set_bit(idx, cpuc->running);
1070 x86_pmu.enable(event);
1071 perf_event_update_userpage(event);
1074 void perf_event_print_debug(void)
1076 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1078 struct cpu_hw_events *cpuc;
1079 unsigned long flags;
1082 if (!x86_pmu.num_counters)
1085 local_irq_save(flags);
1087 cpu = smp_processor_id();
1088 cpuc = &per_cpu(cpu_hw_events, cpu);
1090 if (x86_pmu.version >= 2) {
1091 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1092 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1093 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1094 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1095 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
1098 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1099 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1100 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1101 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
1102 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
1104 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1106 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1107 rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
1108 rdmsrl(x86_pmu_event_addr(idx), pmc_count);
1110 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1112 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
1113 cpu, idx, pmc_ctrl);
1114 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
1115 cpu, idx, pmc_count);
1116 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
1117 cpu, idx, prev_left);
1119 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1120 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1122 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1123 cpu, idx, pmc_count);
1125 local_irq_restore(flags);
1128 void x86_pmu_stop(struct perf_event *event, int flags)
1130 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1131 struct hw_perf_event *hwc = &event->hw;
1133 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
1134 x86_pmu.disable(event);
1135 cpuc->events[hwc->idx] = NULL;
1136 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1137 hwc->state |= PERF_HES_STOPPED;
1140 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1142 * Drain the remaining delta count out of a event
1143 * that we are disabling:
1145 x86_perf_event_update(event);
1146 hwc->state |= PERF_HES_UPTODATE;
1150 static void x86_pmu_del(struct perf_event *event, int flags)
1152 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1156 * If we're called during a txn, we don't need to do anything.
1157 * The events never got scheduled and ->cancel_txn will truncate
1160 if (cpuc->group_flag & PERF_EVENT_TXN)
1163 x86_pmu_stop(event, PERF_EF_UPDATE);
1165 for (i = 0; i < cpuc->n_events; i++) {
1166 if (event == cpuc->event_list[i]) {
1168 if (x86_pmu.put_event_constraints)
1169 x86_pmu.put_event_constraints(cpuc, event);
1171 while (++i < cpuc->n_events)
1172 cpuc->event_list[i-1] = cpuc->event_list[i];
1178 perf_event_update_userpage(event);
1181 int x86_pmu_handle_irq(struct pt_regs *regs)
1183 struct perf_sample_data data;
1184 struct cpu_hw_events *cpuc;
1185 struct perf_event *event;
1186 int idx, handled = 0;
1189 cpuc = &__get_cpu_var(cpu_hw_events);
1192 * Some chipsets need to unmask the LVTPC in a particular spot
1193 * inside the nmi handler. As a result, the unmasking was pushed
1194 * into all the nmi handlers.
1196 * This generic handler doesn't seem to have any issues where the
1197 * unmasking occurs so it was left at the top.
1199 apic_write(APIC_LVTPC, APIC_DM_NMI);
1201 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1202 if (!test_bit(idx, cpuc->active_mask)) {
1204 * Though we deactivated the counter some cpus
1205 * might still deliver spurious interrupts still
1206 * in flight. Catch them:
1208 if (__test_and_clear_bit(idx, cpuc->running))
1213 event = cpuc->events[idx];
1215 val = x86_perf_event_update(event);
1216 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
1223 perf_sample_data_init(&data, 0, event->hw.last_period);
1225 if (!x86_perf_event_set_period(event))
1228 if (perf_event_overflow(event, &data, regs))
1229 x86_pmu_stop(event, 0);
1233 inc_irq_stat(apic_perf_irqs);
1238 void perf_events_lapic_init(void)
1240 if (!x86_pmu.apic || !x86_pmu_initialized())
1244 * Always use NMI for PMU
1246 apic_write(APIC_LVTPC, APIC_DM_NMI);
1249 static int __kprobes
1250 perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1252 if (!atomic_read(&active_events))
1255 return x86_pmu.handle_irq(regs);
1258 struct event_constraint emptyconstraint;
1259 struct event_constraint unconstrained;
1261 static int __cpuinit
1262 x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1264 unsigned int cpu = (long)hcpu;
1265 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1266 int ret = NOTIFY_OK;
1268 switch (action & ~CPU_TASKS_FROZEN) {
1269 case CPU_UP_PREPARE:
1270 cpuc->kfree_on_online = NULL;
1271 if (x86_pmu.cpu_prepare)
1272 ret = x86_pmu.cpu_prepare(cpu);
1276 if (x86_pmu.attr_rdpmc)
1277 set_in_cr4(X86_CR4_PCE);
1278 if (x86_pmu.cpu_starting)
1279 x86_pmu.cpu_starting(cpu);
1283 kfree(cpuc->kfree_on_online);
1287 if (x86_pmu.cpu_dying)
1288 x86_pmu.cpu_dying(cpu);
1291 case CPU_UP_CANCELED:
1293 if (x86_pmu.cpu_dead)
1294 x86_pmu.cpu_dead(cpu);
1304 static void __init pmu_check_apic(void)
1310 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1311 pr_info("no hardware sampling interrupt available.\n");
1314 static struct attribute_group x86_pmu_format_group = {
1319 struct perf_pmu_events_attr {
1320 struct device_attribute attr;
1325 * Remove all undefined events (x86_pmu.event_map(id) == 0)
1326 * out of events_attr attributes.
1328 static void __init filter_events(struct attribute **attrs)
1332 for (i = 0; attrs[i]; i++) {
1333 if (x86_pmu.event_map(i))
1336 for (j = i; attrs[j]; j++)
1337 attrs[j] = attrs[j + 1];
1339 /* Check the shifted attr. */
1344 static ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
1347 struct perf_pmu_events_attr *pmu_attr = \
1348 container_of(attr, struct perf_pmu_events_attr, attr);
1350 u64 config = x86_pmu.event_map(pmu_attr->id);
1351 return x86_pmu.events_sysfs_show(page, config);
1354 #define EVENT_VAR(_id) event_attr_##_id
1355 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
1357 #define EVENT_ATTR(_name, _id) \
1358 static struct perf_pmu_events_attr EVENT_VAR(_id) = { \
1359 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
1360 .id = PERF_COUNT_HW_##_id, \
1363 EVENT_ATTR(cpu-cycles, CPU_CYCLES );
1364 EVENT_ATTR(instructions, INSTRUCTIONS );
1365 EVENT_ATTR(cache-references, CACHE_REFERENCES );
1366 EVENT_ATTR(cache-misses, CACHE_MISSES );
1367 EVENT_ATTR(branch-instructions, BRANCH_INSTRUCTIONS );
1368 EVENT_ATTR(branch-misses, BRANCH_MISSES );
1369 EVENT_ATTR(bus-cycles, BUS_CYCLES );
1370 EVENT_ATTR(stalled-cycles-frontend, STALLED_CYCLES_FRONTEND );
1371 EVENT_ATTR(stalled-cycles-backend, STALLED_CYCLES_BACKEND );
1372 EVENT_ATTR(ref-cycles, REF_CPU_CYCLES );
1374 static struct attribute *empty_attrs;
1376 static struct attribute *events_attr[] = {
1377 EVENT_PTR(CPU_CYCLES),
1378 EVENT_PTR(INSTRUCTIONS),
1379 EVENT_PTR(CACHE_REFERENCES),
1380 EVENT_PTR(CACHE_MISSES),
1381 EVENT_PTR(BRANCH_INSTRUCTIONS),
1382 EVENT_PTR(BRANCH_MISSES),
1383 EVENT_PTR(BUS_CYCLES),
1384 EVENT_PTR(STALLED_CYCLES_FRONTEND),
1385 EVENT_PTR(STALLED_CYCLES_BACKEND),
1386 EVENT_PTR(REF_CPU_CYCLES),
1390 static struct attribute_group x86_pmu_events_group = {
1392 .attrs = events_attr,
1395 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
1397 u64 umask = (config & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
1398 u64 cmask = (config & ARCH_PERFMON_EVENTSEL_CMASK) >> 24;
1399 bool edge = (config & ARCH_PERFMON_EVENTSEL_EDGE);
1400 bool pc = (config & ARCH_PERFMON_EVENTSEL_PIN_CONTROL);
1401 bool any = (config & ARCH_PERFMON_EVENTSEL_ANY);
1402 bool inv = (config & ARCH_PERFMON_EVENTSEL_INV);
1406 * We have whole page size to spend and just little data
1407 * to write, so we can safely use sprintf.
1409 ret = sprintf(page, "event=0x%02llx", event);
1412 ret += sprintf(page + ret, ",umask=0x%02llx", umask);
1415 ret += sprintf(page + ret, ",edge");
1418 ret += sprintf(page + ret, ",pc");
1421 ret += sprintf(page + ret, ",any");
1424 ret += sprintf(page + ret, ",inv");
1427 ret += sprintf(page + ret, ",cmask=0x%02llx", cmask);
1429 ret += sprintf(page + ret, "\n");
1434 static int __init init_hw_perf_events(void)
1436 struct x86_pmu_quirk *quirk;
1439 pr_info("Performance Events: ");
1441 switch (boot_cpu_data.x86_vendor) {
1442 case X86_VENDOR_INTEL:
1443 err = intel_pmu_init();
1445 case X86_VENDOR_AMD:
1446 err = amd_pmu_init();
1452 pr_cont("no PMU driver, software events only.\n");
1458 /* sanity check that the hardware exists or is emulated */
1459 if (!check_hw_exists())
1462 pr_cont("%s PMU driver.\n", x86_pmu.name);
1464 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
1467 if (!x86_pmu.intel_ctrl)
1468 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1470 perf_events_lapic_init();
1471 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
1473 unconstrained = (struct event_constraint)
1474 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1475 0, x86_pmu.num_counters, 0);
1477 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1478 x86_pmu_format_group.attrs = x86_pmu.format_attrs;
1480 if (!x86_pmu.events_sysfs_show)
1481 x86_pmu_events_group.attrs = &empty_attrs;
1483 filter_events(x86_pmu_events_group.attrs);
1485 pr_info("... version: %d\n", x86_pmu.version);
1486 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1487 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1488 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
1489 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1490 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
1491 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
1493 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1494 perf_cpu_notifier(x86_pmu_notifier);
1498 early_initcall(init_hw_perf_events);
1500 static inline void x86_pmu_read(struct perf_event *event)
1502 x86_perf_event_update(event);
1506 * Start group events scheduling transaction
1507 * Set the flag to make pmu::enable() not perform the
1508 * schedulability test, it will be performed at commit time
1510 static void x86_pmu_start_txn(struct pmu *pmu)
1512 perf_pmu_disable(pmu);
1513 __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
1514 __this_cpu_write(cpu_hw_events.n_txn, 0);
1518 * Stop group events scheduling transaction
1519 * Clear the flag and pmu::enable() will perform the
1520 * schedulability test.
1522 static void x86_pmu_cancel_txn(struct pmu *pmu)
1524 __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
1526 * Truncate the collected events.
1528 __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1529 __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
1530 perf_pmu_enable(pmu);
1534 * Commit group events scheduling transaction
1535 * Perform the group schedulability test as a whole
1536 * Return 0 if success
1538 static int x86_pmu_commit_txn(struct pmu *pmu)
1540 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1541 int assign[X86_PMC_IDX_MAX];
1546 if (!x86_pmu_initialized())
1549 ret = x86_pmu.schedule_events(cpuc, n, assign);
1554 * copy new assignment, now we know it is possible
1555 * will be used by hw_perf_enable()
1557 memcpy(cpuc->assign, assign, n*sizeof(int));
1559 cpuc->group_flag &= ~PERF_EVENT_TXN;
1560 perf_pmu_enable(pmu);
1564 * a fake_cpuc is used to validate event groups. Due to
1565 * the extra reg logic, we need to also allocate a fake
1566 * per_core and per_cpu structure. Otherwise, group events
1567 * using extra reg may conflict without the kernel being
1568 * able to catch this when the last event gets added to
1571 static void free_fake_cpuc(struct cpu_hw_events *cpuc)
1573 kfree(cpuc->shared_regs);
1577 static struct cpu_hw_events *allocate_fake_cpuc(void)
1579 struct cpu_hw_events *cpuc;
1580 int cpu = raw_smp_processor_id();
1582 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
1584 return ERR_PTR(-ENOMEM);
1586 /* only needed, if we have extra_regs */
1587 if (x86_pmu.extra_regs) {
1588 cpuc->shared_regs = allocate_shared_regs(cpu);
1589 if (!cpuc->shared_regs)
1595 free_fake_cpuc(cpuc);
1596 return ERR_PTR(-ENOMEM);
1600 * validate that we can schedule this event
1602 static int validate_event(struct perf_event *event)
1604 struct cpu_hw_events *fake_cpuc;
1605 struct event_constraint *c;
1608 fake_cpuc = allocate_fake_cpuc();
1609 if (IS_ERR(fake_cpuc))
1610 return PTR_ERR(fake_cpuc);
1612 c = x86_pmu.get_event_constraints(fake_cpuc, event);
1614 if (!c || !c->weight)
1617 if (x86_pmu.put_event_constraints)
1618 x86_pmu.put_event_constraints(fake_cpuc, event);
1620 free_fake_cpuc(fake_cpuc);
1626 * validate a single event group
1628 * validation include:
1629 * - check events are compatible which each other
1630 * - events do not compete for the same counter
1631 * - number of events <= number of counters
1633 * validation ensures the group can be loaded onto the
1634 * PMU if it was the only group available.
1636 static int validate_group(struct perf_event *event)
1638 struct perf_event *leader = event->group_leader;
1639 struct cpu_hw_events *fake_cpuc;
1640 int ret = -EINVAL, n;
1642 fake_cpuc = allocate_fake_cpuc();
1643 if (IS_ERR(fake_cpuc))
1644 return PTR_ERR(fake_cpuc);
1646 * the event is not yet connected with its
1647 * siblings therefore we must first collect
1648 * existing siblings, then add the new event
1649 * before we can simulate the scheduling
1651 n = collect_events(fake_cpuc, leader, true);
1655 fake_cpuc->n_events = n;
1656 n = collect_events(fake_cpuc, event, false);
1660 fake_cpuc->n_events = n;
1662 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
1665 free_fake_cpuc(fake_cpuc);
1669 static int x86_pmu_event_init(struct perf_event *event)
1674 switch (event->attr.type) {
1676 case PERF_TYPE_HARDWARE:
1677 case PERF_TYPE_HW_CACHE:
1684 err = __x86_pmu_event_init(event);
1687 * we temporarily connect event to its pmu
1688 * such that validate_group() can classify
1689 * it as an x86 event using is_x86_event()
1694 if (event->group_leader != event)
1695 err = validate_group(event);
1697 err = validate_event(event);
1703 event->destroy(event);
1709 static int x86_pmu_event_idx(struct perf_event *event)
1711 int idx = event->hw.idx;
1713 if (!x86_pmu.attr_rdpmc)
1716 if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
1717 idx -= INTEL_PMC_IDX_FIXED;
1724 static ssize_t get_attr_rdpmc(struct device *cdev,
1725 struct device_attribute *attr,
1728 return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
1731 static void change_rdpmc(void *info)
1733 bool enable = !!(unsigned long)info;
1736 set_in_cr4(X86_CR4_PCE);
1738 clear_in_cr4(X86_CR4_PCE);
1741 static ssize_t set_attr_rdpmc(struct device *cdev,
1742 struct device_attribute *attr,
1743 const char *buf, size_t count)
1748 ret = kstrtoul(buf, 0, &val);
1752 if (!!val != !!x86_pmu.attr_rdpmc) {
1753 x86_pmu.attr_rdpmc = !!val;
1754 smp_call_function(change_rdpmc, (void *)val, 1);
1760 static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
1762 static struct attribute *x86_pmu_attrs[] = {
1763 &dev_attr_rdpmc.attr,
1767 static struct attribute_group x86_pmu_attr_group = {
1768 .attrs = x86_pmu_attrs,
1771 static const struct attribute_group *x86_pmu_attr_groups[] = {
1772 &x86_pmu_attr_group,
1773 &x86_pmu_format_group,
1774 &x86_pmu_events_group,
1778 static void x86_pmu_flush_branch_stack(void)
1780 if (x86_pmu.flush_branch_stack)
1781 x86_pmu.flush_branch_stack();
1784 void perf_check_microcode(void)
1786 if (x86_pmu.check_microcode)
1787 x86_pmu.check_microcode();
1789 EXPORT_SYMBOL_GPL(perf_check_microcode);
1791 static struct pmu pmu = {
1792 .pmu_enable = x86_pmu_enable,
1793 .pmu_disable = x86_pmu_disable,
1795 .attr_groups = x86_pmu_attr_groups,
1797 .event_init = x86_pmu_event_init,
1801 .start = x86_pmu_start,
1802 .stop = x86_pmu_stop,
1803 .read = x86_pmu_read,
1805 .start_txn = x86_pmu_start_txn,
1806 .cancel_txn = x86_pmu_cancel_txn,
1807 .commit_txn = x86_pmu_commit_txn,
1809 .event_idx = x86_pmu_event_idx,
1810 .flush_branch_stack = x86_pmu_flush_branch_stack,
1813 void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1815 userpg->cap_usr_time = 0;
1816 userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
1817 userpg->pmc_width = x86_pmu.cntval_bits;
1819 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1822 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
1825 userpg->cap_usr_time = 1;
1826 userpg->time_mult = this_cpu_read(cyc2ns);
1827 userpg->time_shift = CYC2NS_SCALE_FACTOR;
1828 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
1835 static int backtrace_stack(void *data, char *name)
1840 static void backtrace_address(void *data, unsigned long addr, int reliable)
1842 struct perf_callchain_entry *entry = data;
1844 perf_callchain_store(entry, addr);
1847 static const struct stacktrace_ops backtrace_ops = {
1848 .stack = backtrace_stack,
1849 .address = backtrace_address,
1850 .walk_stack = print_context_stack_bp,
1854 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
1856 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1857 /* TODO: We don't support guest os callchain now */
1861 perf_callchain_store(entry, regs->ip);
1863 dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
1867 valid_user_frame(const void __user *fp, unsigned long size)
1869 return (__range_not_ok(fp, size, TASK_SIZE) == 0);
1872 static unsigned long get_segment_base(unsigned int segment)
1874 struct desc_struct *desc;
1875 int idx = segment >> 3;
1877 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
1878 if (idx > LDT_ENTRIES)
1881 if (idx > current->active_mm->context.size)
1884 desc = current->active_mm->context.ldt;
1886 if (idx > GDT_ENTRIES)
1889 desc = __this_cpu_ptr(&gdt_page.gdt[0]);
1892 return get_desc_base(desc + idx);
1895 #ifdef CONFIG_COMPAT
1897 #include <asm/compat.h>
1900 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1902 /* 32-bit process in 64-bit kernel. */
1903 unsigned long ss_base, cs_base;
1904 struct stack_frame_ia32 frame;
1905 const void __user *fp;
1907 if (!test_thread_flag(TIF_IA32))
1910 cs_base = get_segment_base(regs->cs);
1911 ss_base = get_segment_base(regs->ss);
1913 fp = compat_ptr(ss_base + regs->bp);
1914 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1915 unsigned long bytes;
1916 frame.next_frame = 0;
1917 frame.return_address = 0;
1919 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1920 if (bytes != sizeof(frame))
1923 if (!valid_user_frame(fp, sizeof(frame)))
1926 perf_callchain_store(entry, cs_base + frame.return_address);
1927 fp = compat_ptr(ss_base + frame.next_frame);
1933 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1940 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1942 struct stack_frame frame;
1943 const void __user *fp;
1945 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1946 /* TODO: We don't support guest os callchain now */
1951 * We don't know what to do with VM86 stacks.. ignore them for now.
1953 if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
1956 fp = (void __user *)regs->bp;
1958 perf_callchain_store(entry, regs->ip);
1963 if (perf_callchain_user32(regs, entry))
1966 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1967 unsigned long bytes;
1968 frame.next_frame = NULL;
1969 frame.return_address = 0;
1971 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1972 if (bytes != sizeof(frame))
1975 if (!valid_user_frame(fp, sizeof(frame)))
1978 perf_callchain_store(entry, frame.return_address);
1979 fp = frame.next_frame;
1984 * Deal with code segment offsets for the various execution modes:
1986 * VM86 - the good olde 16 bit days, where the linear address is
1987 * 20 bits and we use regs->ip + 0x10 * regs->cs.
1989 * IA32 - Where we need to look at GDT/LDT segment descriptor tables
1990 * to figure out what the 32bit base address is.
1992 * X32 - has TIF_X32 set, but is running in x86_64
1994 * X86_64 - CS,DS,SS,ES are all zero based.
1996 static unsigned long code_segment_base(struct pt_regs *regs)
1999 * If we are in VM86 mode, add the segment offset to convert to a
2002 if (regs->flags & X86_VM_MASK)
2003 return 0x10 * regs->cs;
2006 * For IA32 we look at the GDT/LDT segment base to convert the
2007 * effective IP to a linear address.
2009 #ifdef CONFIG_X86_32
2010 if (user_mode(regs) && regs->cs != __USER_CS)
2011 return get_segment_base(regs->cs);
2013 if (test_thread_flag(TIF_IA32)) {
2014 if (user_mode(regs) && regs->cs != __USER32_CS)
2015 return get_segment_base(regs->cs);
2021 unsigned long perf_instruction_pointer(struct pt_regs *regs)
2023 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
2024 return perf_guest_cbs->get_guest_ip();
2026 return regs->ip + code_segment_base(regs);
2029 unsigned long perf_misc_flags(struct pt_regs *regs)
2033 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2034 if (perf_guest_cbs->is_user_mode())
2035 misc |= PERF_RECORD_MISC_GUEST_USER;
2037 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
2039 if (user_mode(regs))
2040 misc |= PERF_RECORD_MISC_USER;
2042 misc |= PERF_RECORD_MISC_KERNEL;
2045 if (regs->flags & PERF_EFLAGS_EXACT)
2046 misc |= PERF_RECORD_MISC_EXACT_IP;
2051 void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
2053 cap->version = x86_pmu.version;
2054 cap->num_counters_gp = x86_pmu.num_counters;
2055 cap->num_counters_fixed = x86_pmu.num_counters_fixed;
2056 cap->bit_width_gp = x86_pmu.cntval_bits;
2057 cap->bit_width_fixed = x86_pmu.cntval_bits;
2058 cap->events_mask = (unsigned int)x86_pmu.events_maskl;
2059 cap->events_mask_len = x86_pmu.events_mask_len;
2061 EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);