2 * Performance events - AMD IBS
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
6 * For licencing details see kernel-base/COPYING
9 #include <linux/perf_event.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/ptrace.h>
16 #include "perf_event.h"
20 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
22 #include <linux/kprobes.h>
23 #include <linux/hardirq.h>
27 #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
28 #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
39 struct perf_event *event;
40 unsigned long state[BITS_TO_LONGS(IBS_MAX_STATES)];
51 unsigned long offset_mask[1];
53 struct cpu_perf_ibs __percpu *pcpu;
54 u64 (*get_count)(u64 config);
57 struct perf_ibs_data {
60 u32 data[0]; /* data buffer starts here */
63 u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX];
67 perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period)
69 s64 left = local64_read(&hwc->period_left);
70 s64 period = hwc->sample_period;
74 * If we are way outside a reasonable range then just skip forward:
76 if (unlikely(left <= -period)) {
78 local64_set(&hwc->period_left, left);
79 hwc->last_period = period;
83 if (unlikely(left < (s64)min)) {
85 local64_set(&hwc->period_left, left);
86 hwc->last_period = period;
91 * If the hw period that triggers the sw overflow is too short
92 * we might hit the irq handler. This biases the results.
93 * Thus we shorten the next-to-last period and set the last
94 * period to the max period.
104 *hw_period = (u64)left;
110 perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width)
112 struct hw_perf_event *hwc = &event->hw;
113 int shift = 64 - width;
118 * Careful: an NMI might modify the previous event value.
120 * Our tactic to handle this is to first atomically read and
121 * exchange a new raw count - then add that new-prev delta
122 * count to the generic event atomically:
124 prev_raw_count = local64_read(&hwc->prev_count);
125 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
126 new_raw_count) != prev_raw_count)
130 * Now we have the new raw value and have updated the prev
131 * timestamp already. We can now calculate the elapsed delta
132 * (event-)time and add that to the generic event.
134 * Careful, not all hw sign-extends above the physical width
137 delta = (new_raw_count << shift) - (prev_raw_count << shift);
140 local64_add(delta, &event->count);
141 local64_sub(delta, &hwc->period_left);
146 static struct perf_ibs perf_ibs_fetch;
147 static struct perf_ibs perf_ibs_op;
149 static struct perf_ibs *get_ibs_pmu(int type)
151 if (perf_ibs_fetch.pmu.type == type)
152 return &perf_ibs_fetch;
153 if (perf_ibs_op.pmu.type == type)
159 * Use IBS for precise event sampling:
161 * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
162 * perf record -a -e r076:p ... # same as -e cpu-cycles:p
163 * perf record -a -e r0C1:p ... # use ibs op counting micro-ops
165 * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
166 * MSRC001_1033) is used to select either cycle or micro-ops counting
169 * The rip of IBS samples has skid 0. Thus, IBS supports precise
170 * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
171 * rip is invalid when IBS was not able to record the rip correctly.
172 * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
175 static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
177 switch (event->attr.precise_ip) {
187 switch (event->attr.type) {
188 case PERF_TYPE_HARDWARE:
189 switch (event->attr.config) {
190 case PERF_COUNT_HW_CPU_CYCLES:
196 switch (event->attr.config) {
201 *config = IBS_OP_CNT_CTL;
212 static int perf_ibs_init(struct perf_event *event)
214 struct hw_perf_event *hwc = &event->hw;
215 struct perf_ibs *perf_ibs;
219 perf_ibs = get_ibs_pmu(event->attr.type);
221 config = event->attr.config;
223 perf_ibs = &perf_ibs_op;
224 ret = perf_ibs_precise_event(event, &config);
229 if (event->pmu != &perf_ibs->pmu)
232 if (config & ~perf_ibs->config_mask)
235 if (hwc->sample_period) {
236 if (config & perf_ibs->cnt_mask)
237 /* raw max_cnt may not be set */
239 if (!event->attr.sample_freq && hwc->sample_period & 0x0f)
241 * lower 4 bits can not be set in ibs max cnt,
242 * but allowing it in case we adjust the
243 * sample period to set a frequency.
246 hwc->sample_period &= ~0x0FULL;
247 if (!hwc->sample_period)
248 hwc->sample_period = 0x10;
250 max_cnt = config & perf_ibs->cnt_mask;
251 config &= ~perf_ibs->cnt_mask;
252 event->attr.sample_period = max_cnt << 4;
253 hwc->sample_period = event->attr.sample_period;
256 if (!hwc->sample_period)
260 * If we modify hwc->sample_period, we also need to update
261 * hwc->last_period and hwc->period_left.
263 hwc->last_period = hwc->sample_period;
264 local64_set(&hwc->period_left, hwc->sample_period);
266 hwc->config_base = perf_ibs->msr;
267 hwc->config = config;
272 static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
273 struct hw_perf_event *hwc, u64 *period)
277 /* ignore lower 4 bits in min count: */
278 overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
279 local64_set(&hwc->prev_count, 0);
284 static u64 get_ibs_fetch_count(u64 config)
286 return (config & IBS_FETCH_CNT) >> 12;
289 static u64 get_ibs_op_count(u64 config)
293 if (config & IBS_OP_VAL)
294 count += (config & IBS_OP_MAX_CNT) << 4; /* cnt rolled over */
296 if (ibs_caps & IBS_CAPS_RDWROPCNT)
297 count += (config & IBS_OP_CUR_CNT) >> 32;
303 perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
306 u64 count = perf_ibs->get_count(*config);
309 * Set width to 64 since we do not overflow on max width but
310 * instead on max count. In perf_ibs_set_period() we clear
311 * prev count manually on overflow.
313 while (!perf_event_try_update(event, count, 64)) {
314 rdmsrl(event->hw.config_base, *config);
315 count = perf_ibs->get_count(*config);
319 static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
320 struct hw_perf_event *hwc, u64 config)
322 wrmsrl(hwc->config_base, hwc->config | config | perf_ibs->enable_mask);
326 * Erratum #420 Instruction-Based Sampling Engine May Generate
327 * Interrupt that Cannot Be Cleared:
329 * Must clear counter mask first, then clear the enable bit. See
330 * Revision Guide for AMD Family 10h Processors, Publication #41322.
332 static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
333 struct hw_perf_event *hwc, u64 config)
335 config &= ~perf_ibs->cnt_mask;
336 wrmsrl(hwc->config_base, config);
337 config &= ~perf_ibs->enable_mask;
338 wrmsrl(hwc->config_base, config);
342 * We cannot restore the ibs pmu state, so we always needs to update
343 * the event while stopping it and then reset the state when starting
344 * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
345 * perf_ibs_start()/perf_ibs_stop() and instead always do it.
347 static void perf_ibs_start(struct perf_event *event, int flags)
349 struct hw_perf_event *hwc = &event->hw;
350 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
351 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
354 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
357 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
360 perf_ibs_set_period(perf_ibs, hwc, &period);
361 set_bit(IBS_STARTED, pcpu->state);
362 perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
364 perf_event_update_userpage(event);
367 static void perf_ibs_stop(struct perf_event *event, int flags)
369 struct hw_perf_event *hwc = &event->hw;
370 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
371 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
375 stopping = test_and_clear_bit(IBS_STARTED, pcpu->state);
377 if (!stopping && (hwc->state & PERF_HES_UPTODATE))
380 rdmsrl(hwc->config_base, config);
383 set_bit(IBS_STOPPING, pcpu->state);
384 perf_ibs_disable_event(perf_ibs, hwc, config);
385 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
386 hwc->state |= PERF_HES_STOPPED;
389 if (hwc->state & PERF_HES_UPTODATE)
393 * Clear valid bit to not count rollovers on update, rollovers
394 * are only updated in the irq handler.
396 config &= ~perf_ibs->valid_mask;
398 perf_ibs_event_update(perf_ibs, event, &config);
399 hwc->state |= PERF_HES_UPTODATE;
402 static int perf_ibs_add(struct perf_event *event, int flags)
404 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
405 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
407 if (test_and_set_bit(IBS_ENABLED, pcpu->state))
410 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
414 if (flags & PERF_EF_START)
415 perf_ibs_start(event, PERF_EF_RELOAD);
420 static void perf_ibs_del(struct perf_event *event, int flags)
422 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
423 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
425 if (!test_and_clear_bit(IBS_ENABLED, pcpu->state))
428 perf_ibs_stop(event, PERF_EF_UPDATE);
432 perf_event_update_userpage(event);
435 static void perf_ibs_read(struct perf_event *event) { }
437 static struct perf_ibs perf_ibs_fetch = {
439 .task_ctx_nr = perf_invalid_context,
441 .event_init = perf_ibs_init,
444 .start = perf_ibs_start,
445 .stop = perf_ibs_stop,
446 .read = perf_ibs_read,
448 .msr = MSR_AMD64_IBSFETCHCTL,
449 .config_mask = IBS_FETCH_CONFIG_MASK,
450 .cnt_mask = IBS_FETCH_MAX_CNT,
451 .enable_mask = IBS_FETCH_ENABLE,
452 .valid_mask = IBS_FETCH_VAL,
453 .max_period = IBS_FETCH_MAX_CNT << 4,
454 .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
455 .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
457 .get_count = get_ibs_fetch_count,
460 static struct perf_ibs perf_ibs_op = {
462 .task_ctx_nr = perf_invalid_context,
464 .event_init = perf_ibs_init,
467 .start = perf_ibs_start,
468 .stop = perf_ibs_stop,
469 .read = perf_ibs_read,
471 .msr = MSR_AMD64_IBSOPCTL,
472 .config_mask = IBS_OP_CONFIG_MASK,
473 .cnt_mask = IBS_OP_MAX_CNT,
474 .enable_mask = IBS_OP_ENABLE,
475 .valid_mask = IBS_OP_VAL,
476 .max_period = IBS_OP_MAX_CNT << 4,
477 .offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
478 .offset_max = MSR_AMD64_IBSOP_REG_COUNT,
480 .get_count = get_ibs_op_count,
483 static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
485 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
486 struct perf_event *event = pcpu->event;
487 struct hw_perf_event *hwc = &event->hw;
488 struct perf_sample_data data;
489 struct perf_raw_record raw;
491 struct perf_ibs_data ibs_data;
492 int offset, size, check_rip, offset_max, throttle = 0;
494 u64 *buf, *config, period;
496 if (!test_bit(IBS_STARTED, pcpu->state)) {
498 * Catch spurious interrupts after stopping IBS: After
499 * disabling IBS there could be still incomming NMIs
500 * with samples that even have the valid bit cleared.
501 * Mark all this NMIs as handled.
503 return test_and_clear_bit(IBS_STOPPING, pcpu->state) ? 1 : 0;
506 msr = hwc->config_base;
509 if (!(*buf++ & perf_ibs->valid_mask))
512 config = &ibs_data.regs[0];
513 perf_ibs_event_update(perf_ibs, event, config);
514 perf_sample_data_init(&data, 0, hwc->last_period);
515 if (!perf_ibs_set_period(perf_ibs, hwc, &period))
516 goto out; /* no sw counter overflow */
518 ibs_data.caps = ibs_caps;
521 check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
522 if (event->attr.sample_type & PERF_SAMPLE_RAW)
523 offset_max = perf_ibs->offset_max;
529 rdmsrl(msr + offset, *buf++);
531 offset = find_next_bit(perf_ibs->offset_mask,
532 perf_ibs->offset_max,
534 } while (offset < offset_max);
535 ibs_data.size = sizeof(u64) * size;
538 if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
539 regs.flags &= ~PERF_EFLAGS_EXACT;
541 set_linear_ip(®s, ibs_data.regs[1]);
542 regs.flags |= PERF_EFLAGS_EXACT;
545 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
546 raw.size = sizeof(u32) + ibs_data.size;
547 raw.data = ibs_data.data;
551 throttle = perf_event_overflow(event, &data, ®s);
554 perf_ibs_disable_event(perf_ibs, hwc, *config);
556 perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
558 perf_event_update_userpage(event);
564 perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
568 handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs);
569 handled += perf_ibs_handle_irq(&perf_ibs_op, regs);
572 inc_irq_stat(apic_perf_irqs);
577 static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
579 struct cpu_perf_ibs __percpu *pcpu;
582 pcpu = alloc_percpu(struct cpu_perf_ibs);
586 perf_ibs->pcpu = pcpu;
588 ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
590 perf_ibs->pcpu = NULL;
597 static __init int perf_event_ibs_init(void)
600 return -ENODEV; /* ibs not supported by the cpu */
602 perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
603 if (ibs_caps & IBS_CAPS_OPCNT)
604 perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
605 perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
606 register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
607 printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps);
612 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
614 static __init int perf_event_ibs_init(void) { return 0; }
618 /* IBS - apic initialization, for perf and oprofile */
620 static __init u32 __get_ibs_caps(void)
623 unsigned int max_level;
625 if (!boot_cpu_has(X86_FEATURE_IBS))
628 /* check IBS cpuid feature flags */
629 max_level = cpuid_eax(0x80000000);
630 if (max_level < IBS_CPUID_FEATURES)
631 return IBS_CAPS_DEFAULT;
633 caps = cpuid_eax(IBS_CPUID_FEATURES);
634 if (!(caps & IBS_CAPS_AVAIL))
635 /* cpuid flags not valid */
636 return IBS_CAPS_DEFAULT;
641 u32 get_ibs_caps(void)
646 EXPORT_SYMBOL(get_ibs_caps);
648 static inline int get_eilvt(int offset)
650 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
653 static inline int put_eilvt(int offset)
655 return !setup_APIC_eilvt(offset, 0, 0, 1);
659 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
661 static inline int ibs_eilvt_valid(void)
669 rdmsrl(MSR_AMD64_IBSCTL, val);
670 offset = val & IBSCTL_LVT_OFFSET_MASK;
672 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
673 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
674 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
678 if (!get_eilvt(offset)) {
679 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
680 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
691 static int setup_ibs_ctl(int ibs_eilvt_off)
693 struct pci_dev *cpu_cfg;
700 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
701 PCI_DEVICE_ID_AMD_10H_NB_MISC,
706 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
707 | IBSCTL_LVT_OFFSET_VALID);
708 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
709 if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
710 pci_dev_put(cpu_cfg);
711 printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
712 "IBSCTL = 0x%08x\n", value);
718 printk(KERN_DEBUG "No CPU node configured for IBS\n");
726 * This runs only on the current cpu. We try to find an LVT offset and
727 * setup the local APIC. For this we must disable preemption. On
728 * success we initialize all nodes with this offset. This updates then
729 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
730 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
731 * is using the new offset.
733 static int force_ibs_eilvt_setup(void)
739 /* find the next free available EILVT entry, skip offset 0 */
740 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
741 if (get_eilvt(offset))
746 if (offset == APIC_EILVT_NR_MAX) {
747 printk(KERN_DEBUG "No EILVT entry available\n");
751 ret = setup_ibs_ctl(offset);
755 if (!ibs_eilvt_valid()) {
760 pr_info("IBS: LVT offset %d assigned\n", offset);
770 static inline int get_ibs_lvt_offset(void)
774 rdmsrl(MSR_AMD64_IBSCTL, val);
775 if (!(val & IBSCTL_LVT_OFFSET_VALID))
778 return val & IBSCTL_LVT_OFFSET_MASK;
781 static void setup_APIC_ibs(void *dummy)
785 offset = get_ibs_lvt_offset();
789 if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
792 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
796 static void clear_APIC_ibs(void *dummy)
800 offset = get_ibs_lvt_offset();
802 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
806 perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
808 switch (action & ~CPU_TASKS_FROZEN) {
810 setup_APIC_ibs(NULL);
813 clear_APIC_ibs(NULL);
822 static __init int amd_ibs_init(void)
827 caps = __get_ibs_caps();
829 return -ENODEV; /* ibs not supported by the cpu */
832 * Force LVT offset assignment for family 10h: The offsets are
833 * not assigned by the BIOS for this family, so the OS is
834 * responsible for doing it. If the OS assignment fails, fall
835 * back to BIOS settings and try to setup this.
837 if (boot_cpu_data.x86 == 0x10)
838 force_ibs_eilvt_setup();
840 if (!ibs_eilvt_valid())
845 /* make ibs_caps visible to other cpus: */
847 perf_cpu_notifier(perf_ibs_cpu_notifier);
848 smp_call_function(setup_APIC_ibs, NULL, 1);
851 ret = perf_event_ibs_init();
854 pr_err("Failed to setup IBS, %d\n", ret);
858 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
859 device_initcall(amd_ibs_init);