2 * Hardware performance events for the Alpha.
4 * We implement HW counts on the EV67 and subsequent CPUs only.
6 * (C) 2010 Michael J. Cree
8 * Somewhat based on the Sparc code, and to a lesser extent the PowerPC and
9 * ARM code, which are copyright by their respective authors.
12 #include <linux/perf_event.h>
13 #include <linux/kprobes.h>
14 #include <linux/kernel.h>
15 #include <linux/kdebug.h>
16 #include <linux/mutex.h>
18 #include <asm/hwrpb.h>
19 #include <asm/atomic.h>
21 #include <asm/irq_regs.h>
23 #include <asm/wrperfmon.h>
24 #include <asm/hw_irq.h>
27 /* The maximum number of PMCs on any Alpha CPU whatsoever. */
28 #define MAX_HWEVENTS 3
29 #define PMC_NO_INDEX -1
31 /* For tracking PMCs and the hw events they monitor on each CPU. */
32 struct cpu_hw_events {
34 /* Number of events scheduled; also number entries valid in arrays below. */
36 /* Number events added since last hw_perf_disable(). */
38 /* Events currently scheduled. */
39 struct perf_event *event[MAX_HWEVENTS];
40 /* Event type of each scheduled event. */
41 unsigned long evtype[MAX_HWEVENTS];
42 /* Current index of each scheduled event; if not yet determined
43 * contains PMC_NO_INDEX.
45 int current_idx[MAX_HWEVENTS];
46 /* The active PMCs' config for easy use with wrperfmon(). */
48 /* The active counters' indices for easy use with wrperfmon(). */
49 unsigned long idx_mask;
51 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
56 * A structure to hold the description of the PMCs available on a particular
60 /* Mapping of the perf system hw event types to indigenous event types */
62 /* The number of entries in the event_map */
64 /* The number of PMCs on this Alpha */
67 * All PMC counters reside in the IBOX register PCTR. This is the
70 int pmc_count_shift[MAX_HWEVENTS];
72 * The mask that isolates the PMC bits when the LSB of the counter
73 * is shifted to bit 0.
75 unsigned long pmc_count_mask[MAX_HWEVENTS];
76 /* The maximum period the PMC can count. */
77 unsigned long pmc_max_period[MAX_HWEVENTS];
79 * The maximum value that may be written to the counter due to
80 * hardware restrictions is pmc_max_period - pmc_left.
83 /* Subroutine for allocation of PMCs. Enforces constraints. */
84 int (*check_constraints)(struct perf_event **, unsigned long *, int);
88 * The Alpha CPU PMU description currently in operation. This is set during
89 * the boot process to the specific CPU of the machine.
91 static const struct alpha_pmu_t *alpha_pmu;
94 #define HW_OP_UNSUPPORTED -1
97 * The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs
98 * follow. Since they are identical we refer to them collectively as the
103 * EV67 PMC event types
105 * There is no one-to-one mapping of the possible hw event types to the
106 * actual codes that are used to program the PMCs hence we introduce our
107 * own hw event type identifiers.
109 enum ev67_pmc_event_type {
116 #define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES)
119 /* Mapping of the hw event types to the perf tool interface */
120 static const int ev67_perfmon_event_map[] = {
121 [PERF_COUNT_HW_CPU_CYCLES] = EV67_CYCLES,
122 [PERF_COUNT_HW_INSTRUCTIONS] = EV67_INSTRUCTIONS,
123 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
124 [PERF_COUNT_HW_CACHE_MISSES] = EV67_BCACHEMISS,
127 struct ev67_mapping_t {
133 * The mapping used for one event only - these must be in same order as enum
134 * ev67_pmc_event_type definition.
136 static const struct ev67_mapping_t ev67_mapping[] = {
137 {EV67_PCTR_INSTR_CYCLES, 1}, /* EV67_CYCLES, */
138 {EV67_PCTR_INSTR_CYCLES, 0}, /* EV67_INSTRUCTIONS */
139 {EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */
140 {EV67_PCTR_CYCLES_MBOX, 1} /* EV67_MBOXREPLAY */
145 * Check that a group of events can be simultaneously scheduled on to the
146 * EV67 PMU. Also allocate counter indices and config.
148 static int ev67_check_constraints(struct perf_event **event,
149 unsigned long *evtype, int n_ev)
152 unsigned long config;
154 idx0 = ev67_mapping[evtype[0]-1].idx;
155 config = ev67_mapping[evtype[0]-1].config;
161 if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) {
162 /* MBOX replay traps must be on PMC 1 */
163 idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0;
164 /* Only cycles can accompany MBOX replay traps */
165 if (evtype[idx0] == EV67_CYCLES) {
166 config = EV67_PCTR_CYCLES_MBOX;
171 if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) {
172 /* Bcache misses must be on PMC 1 */
173 idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0;
174 /* Only instructions can accompany Bcache misses */
175 if (evtype[idx0] == EV67_INSTRUCTIONS) {
176 config = EV67_PCTR_INSTR_BCACHEMISS;
181 if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) {
182 /* Instructions must be on PMC 0 */
183 idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1;
184 /* By this point only cycles can accompany instructions */
185 if (evtype[idx0^1] == EV67_CYCLES) {
186 config = EV67_PCTR_INSTR_CYCLES;
191 /* Otherwise, darn it, there is a conflict. */
195 event[0]->hw.idx = idx0;
196 event[0]->hw.config_base = config;
198 event[1]->hw.idx = idx0 ^ 1;
199 event[1]->hw.config_base = config;
205 static const struct alpha_pmu_t ev67_pmu = {
206 .event_map = ev67_perfmon_event_map,
207 .max_events = ARRAY_SIZE(ev67_perfmon_event_map),
209 .pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0},
210 .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0},
211 .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0},
212 .pmc_left = {16, 4, 0},
213 .check_constraints = ev67_check_constraints
219 * Helper routines to ensure that we read/write only the correct PMC bits
220 * when calling the wrperfmon PALcall.
222 static inline void alpha_write_pmc(int idx, unsigned long val)
224 val &= alpha_pmu->pmc_count_mask[idx];
225 val <<= alpha_pmu->pmc_count_shift[idx];
227 wrperfmon(PERFMON_CMD_WRITE, val);
230 static inline unsigned long alpha_read_pmc(int idx)
234 val = wrperfmon(PERFMON_CMD_READ, 0);
235 val >>= alpha_pmu->pmc_count_shift[idx];
236 val &= alpha_pmu->pmc_count_mask[idx];
240 /* Set a new period to sample over */
241 static int alpha_perf_event_set_period(struct perf_event *event,
242 struct hw_perf_event *hwc, int idx)
244 long left = atomic64_read(&hwc->period_left);
245 long period = hwc->sample_period;
248 if (unlikely(left <= -period)) {
250 atomic64_set(&hwc->period_left, left);
251 hwc->last_period = period;
255 if (unlikely(left <= 0)) {
257 atomic64_set(&hwc->period_left, left);
258 hwc->last_period = period;
263 * Hardware restrictions require that the counters must not be
264 * written with values that are too close to the maximum period.
266 if (unlikely(left < alpha_pmu->pmc_left[idx]))
267 left = alpha_pmu->pmc_left[idx];
269 if (left > (long)alpha_pmu->pmc_max_period[idx])
270 left = alpha_pmu->pmc_max_period[idx];
272 atomic64_set(&hwc->prev_count, (unsigned long)(-left));
274 alpha_write_pmc(idx, (unsigned long)(-left));
276 perf_event_update_userpage(event);
283 * Calculates the count (the 'delta') since the last time the PMC was read.
285 * As the PMCs' full period can easily be exceeded within the perf system
286 * sampling period we cannot use any high order bits as a guard bit in the
287 * PMCs to detect overflow as is done by other architectures. The code here
288 * calculates the delta on the basis that there is no overflow when ovf is
289 * zero. The value passed via ovf by the interrupt handler corrects for
292 * This can be racey on rare occasions -- a call to this routine can occur
293 * with an overflowed counter just before the PMI service routine is called.
294 * The check for delta negative hopefully always rectifies this situation.
296 static unsigned long alpha_perf_event_update(struct perf_event *event,
297 struct hw_perf_event *hwc, int idx, long ovf)
299 long prev_raw_count, new_raw_count;
303 prev_raw_count = atomic64_read(&hwc->prev_count);
304 new_raw_count = alpha_read_pmc(idx);
306 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
307 new_raw_count) != prev_raw_count)
310 delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
312 /* It is possible on very rare occasions that the PMC has overflowed
313 * but the interrupt is yet to come. Detect and fix this situation.
315 if (unlikely(delta < 0)) {
316 delta += alpha_pmu->pmc_max_period[idx] + 1;
319 atomic64_add(delta, &event->count);
320 atomic64_sub(delta, &hwc->period_left);
322 return new_raw_count;
327 * Collect all HW events into the array event[].
329 static int collect_events(struct perf_event *group, int max_count,
330 struct perf_event *event[], unsigned long *evtype,
333 struct perf_event *pe;
336 if (!is_software_event(group)) {
340 evtype[n] = group->hw.event_base;
341 current_idx[n++] = PMC_NO_INDEX;
343 list_for_each_entry(pe, &group->sibling_list, group_entry) {
344 if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) {
348 evtype[n] = pe->hw.event_base;
349 current_idx[n++] = PMC_NO_INDEX;
358 * Check that a group of events can be simultaneously scheduled on to the PMU.
360 static int alpha_check_constraints(struct perf_event **events,
361 unsigned long *evtypes, int n_ev)
364 /* No HW events is possible from hw_perf_group_sched_in(). */
368 if (n_ev > alpha_pmu->num_pmcs)
371 return alpha_pmu->check_constraints(events, evtypes, n_ev);
376 * If new events have been scheduled then update cpuc with the new
377 * configuration. This may involve shifting cycle counts from one PMC to
380 static void maybe_change_configuration(struct cpu_hw_events *cpuc)
384 if (cpuc->n_added == 0)
387 /* Find counters that are moving to another PMC and update */
388 for (j = 0; j < cpuc->n_events; j++) {
389 struct perf_event *pe = cpuc->event[j];
391 if (cpuc->current_idx[j] != PMC_NO_INDEX &&
392 cpuc->current_idx[j] != pe->hw.idx) {
393 alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0);
394 cpuc->current_idx[j] = PMC_NO_INDEX;
398 /* Assign to counters all unassigned events. */
400 for (j = 0; j < cpuc->n_events; j++) {
401 struct perf_event *pe = cpuc->event[j];
402 struct hw_perf_event *hwc = &pe->hw;
405 if (cpuc->current_idx[j] != PMC_NO_INDEX) {
406 cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
410 alpha_perf_event_set_period(pe, hwc, idx);
411 cpuc->current_idx[j] = idx;
412 cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
414 cpuc->config = cpuc->event[0]->hw.config_base;
419 /* Schedule perf HW event on to PMU.
420 * - this function is called from outside this module via the pmu struct
421 * returned from perf event initialisation.
423 static int alpha_pmu_enable(struct perf_event *event)
425 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
431 * The Sparc code has the IRQ disable first followed by the perf
432 * disable, however this can lead to an overflowed counter with the
433 * PMI disabled on rare occasions. The alpha_perf_event_update()
434 * routine should detect this situation by noting a negative delta,
435 * nevertheless we disable the PMCs first to enable a potential
436 * final PMI to occur before we disable interrupts.
439 local_irq_save(flags);
441 /* Default to error to be returned */
444 /* Insert event on to PMU and if successful modify ret to valid return */
446 if (n0 < alpha_pmu->num_pmcs) {
447 cpuc->event[n0] = event;
448 cpuc->evtype[n0] = event->hw.event_base;
449 cpuc->current_idx[n0] = PMC_NO_INDEX;
451 if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) {
458 local_irq_restore(flags);
466 /* Disable performance monitoring unit
467 * - this function is called from outside this module via the pmu struct
468 * returned from perf event initialisation.
470 static void alpha_pmu_disable(struct perf_event *event)
472 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
473 struct hw_perf_event *hwc = &event->hw;
478 local_irq_save(flags);
480 for (j = 0; j < cpuc->n_events; j++) {
481 if (event == cpuc->event[j]) {
482 int idx = cpuc->current_idx[j];
484 /* Shift remaining entries down into the existing
487 while (++j < cpuc->n_events) {
488 cpuc->event[j - 1] = cpuc->event[j];
489 cpuc->evtype[j - 1] = cpuc->evtype[j];
490 cpuc->current_idx[j - 1] =
491 cpuc->current_idx[j];
494 /* Absorb the final count and turn off the event. */
495 alpha_perf_event_update(event, hwc, idx, 0);
496 perf_event_update_userpage(event);
498 cpuc->idx_mask &= ~(1UL<<idx);
504 local_irq_restore(flags);
509 static void alpha_pmu_read(struct perf_event *event)
511 struct hw_perf_event *hwc = &event->hw;
513 alpha_perf_event_update(event, hwc, hwc->idx, 0);
517 static void alpha_pmu_unthrottle(struct perf_event *event)
519 struct hw_perf_event *hwc = &event->hw;
520 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
522 cpuc->idx_mask |= 1UL<<hwc->idx;
523 wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
528 * Check that CPU performance counters are supported.
529 * - currently support EV67 and later CPUs.
530 * - actually some later revisions of the EV6 have the same PMC model as the
531 * EV67 but we don't do suffiently deep CPU detection to detect them.
532 * Bad luck to the very few people who might have one, I guess.
534 static int supported_cpu(void)
536 struct percpu_struct *cpu;
537 unsigned long cputype;
539 /* Get cpu type from HW */
540 cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset);
541 cputype = cpu->type & 0xffffffff;
542 /* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */
543 return (cputype >= EV67_CPU) && (cputype <= EV69_CPU);
548 static void hw_perf_event_destroy(struct perf_event *event)
550 /* Nothing to be done! */
556 static int __hw_perf_event_init(struct perf_event *event)
558 struct perf_event_attr *attr = &event->attr;
559 struct hw_perf_event *hwc = &event->hw;
560 struct perf_event *evts[MAX_HWEVENTS];
561 unsigned long evtypes[MAX_HWEVENTS];
562 int idx_rubbish_bin[MAX_HWEVENTS];
566 /* We only support a limited range of HARDWARE event types with one
567 * only programmable via a RAW event type.
569 if (attr->type == PERF_TYPE_HARDWARE) {
570 if (attr->config >= alpha_pmu->max_events)
572 ev = alpha_pmu->event_map[attr->config];
573 } else if (attr->type == PERF_TYPE_HW_CACHE) {
575 } else if (attr->type == PERF_TYPE_RAW) {
576 ev = attr->config & 0xff;
585 /* The EV67 does not support mode exclusion */
586 if (attr->exclude_kernel || attr->exclude_user
587 || attr->exclude_hv || attr->exclude_idle) {
592 * We place the event type in event_base here and leave calculation
593 * of the codes to programme the PMU for alpha_pmu_enable() because
594 * it is only then we will know what HW events are actually
595 * scheduled on to the PMU. At that point the code to programme the
596 * PMU is put into config_base and the PMC to use is placed into
597 * idx. We initialise idx (below) to PMC_NO_INDEX to indicate that
598 * it is yet to be determined.
600 hwc->event_base = ev;
602 /* Collect events in a group together suitable for calling
603 * alpha_check_constraints() to verify that the group as a whole can
604 * be scheduled on to the PMU.
607 if (event->group_leader != event) {
608 n = collect_events(event->group_leader,
609 alpha_pmu->num_pmcs - 1,
610 evts, evtypes, idx_rubbish_bin);
614 evtypes[n] = hwc->event_base;
617 if (alpha_check_constraints(evts, evtypes, n + 1))
620 /* Indicate that PMU config and idx are yet to be determined. */
621 hwc->config_base = 0;
622 hwc->idx = PMC_NO_INDEX;
624 event->destroy = hw_perf_event_destroy;
627 * Most architectures reserve the PMU for their use at this point.
628 * As there is no existing mechanism to arbitrate usage and there
629 * appears to be no other user of the Alpha PMU we just assume
630 * that we can just use it, hence a NO-OP here.
632 * Maybe an alpha_reserve_pmu() routine should be implemented but is
633 * anything else ever going to use it?
636 if (!hwc->sample_period) {
637 hwc->sample_period = alpha_pmu->pmc_max_period[0];
638 hwc->last_period = hwc->sample_period;
639 atomic64_set(&hwc->period_left, hwc->sample_period);
645 static const struct pmu pmu = {
646 .enable = alpha_pmu_enable,
647 .disable = alpha_pmu_disable,
648 .read = alpha_pmu_read,
649 .unthrottle = alpha_pmu_unthrottle,
654 * Main entry point to initialise a HW performance event.
656 const struct pmu *hw_perf_event_init(struct perf_event *event)
661 return ERR_PTR(-ENODEV);
663 /* Do the real initialisation work. */
664 err = __hw_perf_event_init(event);
675 * Main entry point - enable HW performance counters.
677 void hw_perf_enable(void)
679 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
687 if (cpuc->n_events > 0) {
688 /* Update cpuc with information from any new scheduled events. */
689 maybe_change_configuration(cpuc);
691 /* Start counting the desired events. */
692 wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE);
693 wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config);
694 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
700 * Main entry point - disable HW performance counters.
703 void hw_perf_disable(void)
705 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
713 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
718 * Main entry point - don't know when this is called but it
719 * obviously dumps debug info.
721 void perf_event_print_debug(void)
728 if (!supported_cpu())
731 local_irq_save(flags);
733 cpu = smp_processor_id();
735 pcr = wrperfmon(PERFMON_CMD_READ, 0);
736 pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0];
737 pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1];
739 pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1);
741 local_irq_restore(flags);
746 * Performance Monitoring Interrupt Service Routine called when a PMC
747 * overflows. The PMC that overflowed is passed in la_ptr.
749 static void alpha_perf_event_irq_handler(unsigned long la_ptr,
750 struct pt_regs *regs)
752 struct cpu_hw_events *cpuc;
753 struct perf_sample_data data;
754 struct perf_event *event;
755 struct hw_perf_event *hwc;
758 __get_cpu_var(irq_pmi_count)++;
759 cpuc = &__get_cpu_var(cpu_hw_events);
761 /* Completely counting through the PMC's period to trigger a new PMC
762 * overflow interrupt while in this interrupt routine is utterly
763 * disastrous! The EV6 and EV67 counters are sufficiently large to
764 * prevent this but to be really sure disable the PMCs.
766 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
768 /* la_ptr is the counter that overflowed. */
769 if (unlikely(la_ptr >= perf_max_events)) {
770 /* This should never occur! */
772 pr_warning("PMI: silly index %ld\n", la_ptr);
773 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
779 perf_sample_data_init(&data, 0);
780 for (j = 0; j < cpuc->n_events; j++) {
781 if (cpuc->current_idx[j] == idx)
785 if (unlikely(j == cpuc->n_events)) {
786 /* This can occur if the event is disabled right on a PMC overflow. */
787 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
791 event = cpuc->event[j];
793 if (unlikely(!event)) {
794 /* This should never occur! */
796 pr_warning("PMI: No event at index %d!\n", idx);
797 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
802 alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
803 data.period = event->hw.last_period;
805 if (alpha_perf_event_set_period(event, hwc, idx)) {
806 if (perf_event_overflow(event, 1, &data, regs)) {
807 /* Interrupts coming too quickly; "throttle" the
808 * counter, i.e., disable it for a little while.
810 cpuc->idx_mask &= ~(1UL<<idx);
813 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
821 * Init call to initialise performance events at kernel startup.
823 void __init init_hw_perf_events(void)
825 pr_info("Performance events: ");
827 if (!supported_cpu()) {
828 pr_cont("No support for your CPU.\n");
832 pr_cont("Supported CPU type!\n");
834 /* Override performance counter IRQ vector */
836 perf_irq = alpha_perf_event_irq_handler;
838 /* And set up PMU specification */
839 alpha_pmu = &ev67_pmu;
840 perf_max_events = alpha_pmu->num_pmcs;