2 * Performance counter support - powerpc architecture code
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/perf_counter.h>
14 #include <linux/percpu.h>
15 #include <linux/hardirq.h>
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
21 struct cpu_hw_counters {
26 struct perf_counter *counter[MAX_HWCOUNTERS];
27 unsigned int events[MAX_HWCOUNTERS];
31 DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
33 struct power_pmu *ppmu;
36 * Normally, to ignore kernel events we set the FCS (freeze counters
37 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
38 * hypervisor bit set in the MSR, or if we are running on a processor
39 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
40 * then we need to use the FCHV bit to ignore kernel events.
42 static unsigned int freeze_counters_kernel = MMCR0_FCS;
44 void perf_counter_print_debug(void)
49 * Read one performance monitor counter (PMC).
51 static unsigned long read_pmc(int idx)
57 val = mfspr(SPRN_PMC1);
60 val = mfspr(SPRN_PMC2);
63 val = mfspr(SPRN_PMC3);
66 val = mfspr(SPRN_PMC4);
69 val = mfspr(SPRN_PMC5);
72 val = mfspr(SPRN_PMC6);
75 val = mfspr(SPRN_PMC7);
78 val = mfspr(SPRN_PMC8);
81 printk(KERN_ERR "oops trying to read PMC%d\n", idx);
90 static void write_pmc(int idx, unsigned long val)
94 mtspr(SPRN_PMC1, val);
97 mtspr(SPRN_PMC2, val);
100 mtspr(SPRN_PMC3, val);
103 mtspr(SPRN_PMC4, val);
106 mtspr(SPRN_PMC5, val);
109 mtspr(SPRN_PMC6, val);
112 mtspr(SPRN_PMC7, val);
115 mtspr(SPRN_PMC8, val);
118 printk(KERN_ERR "oops trying to write PMC%d\n", idx);
123 * Check if a set of events can all go on the PMU at once.
124 * If they can't, this will look at alternative codes for the events
125 * and see if any combination of alternative codes is feasible.
126 * The feasible set is returned in event[].
128 static int power_check_constraints(unsigned int event[], int n_ev)
131 unsigned int alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
132 u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
133 u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
134 u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
135 int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
137 u64 addf = ppmu->add_fields;
138 u64 tadd = ppmu->test_adder;
140 if (n_ev > ppmu->n_counter)
143 /* First see if the events will go on as-is */
144 for (i = 0; i < n_ev; ++i) {
145 alternatives[i][0] = event[i];
146 if (ppmu->get_constraint(event[i], &amasks[i][0],
152 for (i = 0; i < n_ev; ++i) {
153 nv = (value | avalues[i][0]) + (value & avalues[i][0] & addf);
154 if ((((nv + tadd) ^ value) & mask) != 0 ||
155 (((nv + tadd) ^ avalues[i][0]) & amasks[i][0]) != 0)
158 mask |= amasks[i][0];
161 return 0; /* all OK */
163 /* doesn't work, gather alternatives... */
164 if (!ppmu->get_alternatives)
166 for (i = 0; i < n_ev; ++i) {
167 n_alt[i] = ppmu->get_alternatives(event[i], alternatives[i]);
168 for (j = 1; j < n_alt[i]; ++j)
169 ppmu->get_constraint(alternatives[i][j],
170 &amasks[i][j], &avalues[i][j]);
173 /* enumerate all possibilities and see if any will work */
176 value = mask = nv = 0;
179 /* we're backtracking, restore context */
185 * See if any alternative k for event i,
186 * where k > j, will satisfy the constraints.
188 while (++j < n_alt[i]) {
189 nv = (value | avalues[i][j]) +
190 (value & avalues[i][j] & addf);
191 if ((((nv + tadd) ^ value) & mask) == 0 &&
192 (((nv + tadd) ^ avalues[i][j])
193 & amasks[i][j]) == 0)
198 * No feasible alternative, backtrack
199 * to event i-1 and continue enumerating its
200 * alternatives from where we got up to.
206 * Found a feasible alternative for event i,
207 * remember where we got up to with this event,
208 * go on to the next event, and start with
209 * the first alternative for it.
215 mask |= amasks[i][j];
221 /* OK, we have a feasible combination, tell the caller the solution */
222 for (i = 0; i < n_ev; ++i)
223 event[i] = alternatives[i][choice[i]];
228 * Check if newly-added counters have consistent settings for
229 * exclude_{user,kernel,hv} with each other and any previously
232 static int check_excludes(struct perf_counter **ctrs, int n_prev, int n_new)
236 struct perf_counter *counter;
242 eu = ctrs[0]->hw_event.exclude_user;
243 ek = ctrs[0]->hw_event.exclude_kernel;
244 eh = ctrs[0]->hw_event.exclude_hv;
247 for (i = n_prev; i < n; ++i) {
249 if (counter->hw_event.exclude_user != eu ||
250 counter->hw_event.exclude_kernel != ek ||
251 counter->hw_event.exclude_hv != eh)
257 static void power_perf_read(struct perf_counter *counter)
259 long val, delta, prev;
261 if (!counter->hw.idx)
264 * Performance monitor interrupts come even when interrupts
265 * are soft-disabled, as long as interrupts are hard-enabled.
266 * Therefore we treat them like NMIs.
269 prev = atomic64_read(&counter->hw.prev_count);
271 val = read_pmc(counter->hw.idx);
272 } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev);
274 /* The counters are only 32 bits wide */
275 delta = (val - prev) & 0xfffffffful;
276 atomic64_add(delta, &counter->count);
277 atomic64_sub(delta, &counter->hw.period_left);
281 * Disable all counters to prevent PMU interrupts and to allow
282 * counters to be added or removed.
284 u64 hw_perf_save_disable(void)
286 struct cpu_hw_counters *cpuhw;
290 local_irq_save(flags);
291 cpuhw = &__get_cpu_var(cpu_hw_counters);
293 ret = cpuhw->disabled;
299 * Check if we ever enabled the PMU on this cpu.
301 if (!cpuhw->pmcs_enabled) {
302 if (ppc_md.enable_pmcs)
303 ppc_md.enable_pmcs();
304 cpuhw->pmcs_enabled = 1;
308 * Set the 'freeze counters' bit.
309 * The barrier is to make sure the mtspr has been
310 * executed and the PMU has frozen the counters
313 mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC);
316 local_irq_restore(flags);
321 * Re-enable all counters if disable == 0.
322 * If we were previously disabled and counters were added, then
323 * put the new config on the PMU.
325 void hw_perf_restore(u64 disable)
327 struct perf_counter *counter;
328 struct cpu_hw_counters *cpuhw;
333 unsigned int hwc_index[MAX_HWCOUNTERS];
337 local_irq_save(flags);
338 cpuhw = &__get_cpu_var(cpu_hw_counters);
342 * If we didn't change anything, or only removed counters,
343 * no need to recalculate MMCR* settings and reset the PMCs.
344 * Just reenable the PMU with the current MMCR* settings
345 * (possibly updated for removal of counters).
347 if (!cpuhw->n_added) {
348 mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
349 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
350 mtspr(SPRN_MMCR0, cpuhw->mmcr[0]);
351 if (cpuhw->n_counters == 0)
352 get_lppaca()->pmcregs_in_use = 0;
357 * Compute MMCR* values for the new set of counters
359 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index,
361 /* shouldn't ever get here */
362 printk(KERN_ERR "oops compute_mmcr failed\n");
367 * Add in MMCR0 freeze bits corresponding to the
368 * hw_event.exclude_* bits for the first counter.
369 * We have already checked that all counters have the
370 * same values for these bits as the first counter.
372 counter = cpuhw->counter[0];
373 if (counter->hw_event.exclude_user)
374 cpuhw->mmcr[0] |= MMCR0_FCP;
375 if (counter->hw_event.exclude_kernel)
376 cpuhw->mmcr[0] |= freeze_counters_kernel;
377 if (counter->hw_event.exclude_hv)
378 cpuhw->mmcr[0] |= MMCR0_FCHV;
381 * Write the new configuration to MMCR* with the freeze
382 * bit set and set the hardware counters to their initial values.
383 * Then unfreeze the counters.
385 get_lppaca()->pmcregs_in_use = 1;
386 mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
387 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
388 mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
392 * Read off any pre-existing counters that need to move
395 for (i = 0; i < cpuhw->n_counters; ++i) {
396 counter = cpuhw->counter[i];
397 if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) {
398 power_perf_read(counter);
399 write_pmc(counter->hw.idx, 0);
405 * Initialize the PMCs for all the new and moved counters.
407 for (i = 0; i < cpuhw->n_counters; ++i) {
408 counter = cpuhw->counter[i];
412 if (counter->hw_event.irq_period) {
413 left = atomic64_read(&counter->hw.period_left);
414 if (left < 0x80000000L)
415 val = 0x80000000L - left;
417 atomic64_set(&counter->hw.prev_count, val);
418 counter->hw.idx = hwc_index[i] + 1;
419 write_pmc(counter->hw.idx, val);
422 cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
423 mtspr(SPRN_MMCR0, cpuhw->mmcr[0]);
426 local_irq_restore(flags);
429 static int collect_events(struct perf_counter *group, int max_count,
430 struct perf_counter *ctrs[], unsigned int *events)
433 struct perf_counter *counter;
435 if (!is_software_counter(group)) {
439 events[n++] = group->hw.config;
441 list_for_each_entry(counter, &group->sibling_list, list_entry) {
442 if (!is_software_counter(counter) &&
443 counter->state != PERF_COUNTER_STATE_OFF) {
447 events[n++] = counter->hw.config;
453 static void counter_sched_in(struct perf_counter *counter, int cpu)
455 counter->state = PERF_COUNTER_STATE_ACTIVE;
456 counter->oncpu = cpu;
457 if (is_software_counter(counter))
458 counter->hw_ops->enable(counter);
462 * Called to enable a whole group of counters.
463 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
464 * Assumes the caller has disabled interrupts and has
465 * frozen the PMU with hw_perf_save_disable.
467 int hw_perf_group_sched_in(struct perf_counter *group_leader,
468 struct perf_cpu_context *cpuctx,
469 struct perf_counter_context *ctx, int cpu)
471 struct cpu_hw_counters *cpuhw;
473 struct perf_counter *sub;
475 cpuhw = &__get_cpu_var(cpu_hw_counters);
476 n0 = cpuhw->n_counters;
477 n = collect_events(group_leader, ppmu->n_counter - n0,
478 &cpuhw->counter[n0], &cpuhw->events[n0]);
481 if (check_excludes(cpuhw->counter, n0, n))
483 if (power_check_constraints(cpuhw->events, n + n0))
485 cpuhw->n_counters = n0 + n;
489 * OK, this group can go on; update counter states etc.,
490 * and enable any software counters
492 for (i = n0; i < n0 + n; ++i)
493 cpuhw->counter[i]->hw.config = cpuhw->events[i];
494 cpuctx->active_oncpu += n;
496 counter_sched_in(group_leader, cpu);
497 list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
498 if (sub->state != PERF_COUNTER_STATE_OFF) {
499 counter_sched_in(sub, cpu);
509 * Add a counter to the PMU.
510 * If all counters are not already frozen, then we disable and
511 * re-enable the PMU in order to get hw_perf_restore to do the
512 * actual work of reconfiguring the PMU.
514 static int power_perf_enable(struct perf_counter *counter)
516 struct cpu_hw_counters *cpuhw;
522 local_irq_save(flags);
523 pmudis = hw_perf_save_disable();
526 * Add the counter to the list (if there is room)
527 * and check whether the total set is still feasible.
529 cpuhw = &__get_cpu_var(cpu_hw_counters);
530 n0 = cpuhw->n_counters;
531 if (n0 >= ppmu->n_counter)
533 cpuhw->counter[n0] = counter;
534 cpuhw->events[n0] = counter->hw.config;
535 if (check_excludes(cpuhw->counter, n0, 1))
537 if (power_check_constraints(cpuhw->events, n0 + 1))
540 counter->hw.config = cpuhw->events[n0];
546 hw_perf_restore(pmudis);
547 local_irq_restore(flags);
552 * Remove a counter from the PMU.
554 static void power_perf_disable(struct perf_counter *counter)
556 struct cpu_hw_counters *cpuhw;
561 local_irq_save(flags);
562 pmudis = hw_perf_save_disable();
564 power_perf_read(counter);
566 cpuhw = &__get_cpu_var(cpu_hw_counters);
567 for (i = 0; i < cpuhw->n_counters; ++i) {
568 if (counter == cpuhw->counter[i]) {
569 while (++i < cpuhw->n_counters)
570 cpuhw->counter[i-1] = cpuhw->counter[i];
572 ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr);
573 write_pmc(counter->hw.idx, 0);
578 if (cpuhw->n_counters == 0) {
579 /* disable exceptions if no counters are running */
580 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
583 hw_perf_restore(pmudis);
584 local_irq_restore(flags);
587 struct hw_perf_counter_ops power_perf_ops = {
588 .enable = power_perf_enable,
589 .disable = power_perf_disable,
590 .read = power_perf_read
593 const struct hw_perf_counter_ops *
594 hw_perf_counter_init(struct perf_counter *counter)
597 struct perf_counter *ctrs[MAX_HWCOUNTERS];
598 unsigned int events[MAX_HWCOUNTERS];
603 if ((s64)counter->hw_event.irq_period < 0)
605 if (!perf_event_raw(&counter->hw_event)) {
606 ev = perf_event_id(&counter->hw_event);
607 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
609 ev = ppmu->generic_events[ev];
611 ev = perf_event_config(&counter->hw_event);
613 counter->hw.config_base = ev;
617 * If we are not running on a hypervisor, force the
618 * exclude_hv bit to 0 so that we don't care what
619 * the user set it to.
621 if (!firmware_has_feature(FW_FEATURE_LPAR))
622 counter->hw_event.exclude_hv = 0;
625 * If this is in a group, check if it can go on with all the
626 * other hardware counters in the group. We assume the counter
627 * hasn't been linked into its leader's sibling list at this point.
630 if (counter->group_leader != counter) {
631 n = collect_events(counter->group_leader, ppmu->n_counter - 1,
638 if (check_excludes(ctrs, n, 1))
640 if (power_check_constraints(events, n + 1))
643 counter->hw.config = events[n];
644 atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period);
645 return &power_perf_ops;
651 void perf_counter_do_pending(void)
654 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
655 struct perf_counter *counter;
657 for (i = 0; i < cpuhw->n_counters; ++i) {
658 counter = cpuhw->counter[i];
659 if (counter && counter->wakeup_pending) {
660 counter->wakeup_pending = 0;
661 wake_up(&counter->waitq);
667 * A counter has overflowed; update its count and record
668 * things if requested. Note that interrupts are hard-disabled
669 * here so there is no possibility of being interrupted.
671 static void record_and_restart(struct perf_counter *counter, long val,
672 struct pt_regs *regs)
674 s64 prev, delta, left;
677 /* we don't have to worry about interrupts here */
678 prev = atomic64_read(&counter->hw.prev_count);
679 delta = (val - prev) & 0xfffffffful;
680 atomic64_add(delta, &counter->count);
683 * See if the total period for this counter has expired,
684 * and update for the next period.
687 left = atomic64_read(&counter->hw.period_left) - delta;
688 if (counter->hw_event.irq_period) {
690 left += counter->hw_event.irq_period;
692 left = counter->hw_event.irq_period;
695 if (left < 0x80000000L)
696 val = 0x80000000L - left;
698 write_pmc(counter->hw.idx, val);
699 atomic64_set(&counter->hw.prev_count, val);
700 atomic64_set(&counter->hw.period_left, left);
703 * Finally record data if requested.
706 perf_counter_output(counter, 1, regs);
710 * Performance monitor interrupt stuff
712 static void perf_counter_interrupt(struct pt_regs *regs)
715 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
716 struct perf_counter *counter;
718 int need_wakeup = 0, found = 0;
720 for (i = 0; i < cpuhw->n_counters; ++i) {
721 counter = cpuhw->counter[i];
722 val = read_pmc(counter->hw.idx);
724 /* counter has overflowed */
726 record_and_restart(counter, val, regs);
731 * In case we didn't find and reset the counter that caused
732 * the interrupt, scan all counters and reset any that are
733 * negative, to avoid getting continual interrupts.
734 * Any that we processed in the previous loop will not be negative.
737 for (i = 0; i < ppmu->n_counter; ++i) {
738 val = read_pmc(i + 1);
745 * Reset MMCR0 to its normal value. This will set PMXE and
746 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
747 * and thus allow interrupts to occur again.
748 * XXX might want to use MSR.PM to keep the counters frozen until
749 * we get back out of this interrupt.
751 mtspr(SPRN_MMCR0, cpuhw->mmcr[0]);
754 * If we need a wakeup, check whether interrupts were soft-enabled
755 * when we took the interrupt. If they were, we can wake stuff up
756 * immediately; otherwise we'll have do the wakeup when interrupts
759 if (get_perf_counter_pending() && regs->softe) {
761 clear_perf_counter_pending();
762 perf_counter_do_pending();
767 void hw_perf_counter_setup(int cpu)
769 struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
771 memset(cpuhw, 0, sizeof(*cpuhw));
772 cpuhw->mmcr[0] = MMCR0_FC;
775 extern struct power_pmu power4_pmu;
776 extern struct power_pmu ppc970_pmu;
777 extern struct power_pmu power5_pmu;
778 extern struct power_pmu power5p_pmu;
779 extern struct power_pmu power6_pmu;
781 static int init_perf_counters(void)
785 if (reserve_pmc_hardware(perf_counter_interrupt)) {
786 printk(KERN_ERR "Couldn't init performance monitor subsystem\n");
790 /* XXX should get this from cputable */
791 pvr = mfspr(SPRN_PVR);
792 switch (PVR_VER(pvr)) {
814 * Use FCHV to ignore kernel events if MSR.HV is set.
816 if (mfmsr() & MSR_HV)
817 freeze_counters_kernel = MMCR0_FCHV;
822 arch_initcall(init_perf_counters);