1 /* Performance event support for sparc64.
3 * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
5 * This code is based almost entirely upon the x86 perf event
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
15 #include <linux/perf_event.h>
16 #include <linux/kprobes.h>
17 #include <linux/ftrace.h>
18 #include <linux/kernel.h>
19 #include <linux/kdebug.h>
20 #include <linux/mutex.h>
22 #include <asm/stacktrace.h>
23 #include <asm/cpudata.h>
24 #include <asm/uaccess.h>
25 #include <asm/atomic.h>
31 /* Sparc64 chips have two performance counters, 32-bits each, with
32 * overflow interrupts generated on transition from 0xffffffff to 0.
33 * The counters are accessed in one go using a 64-bit register.
35 * Both counters are controlled using a single control register. The
36 * only way to stop all sampling is to clear all of the context (user,
37 * supervisor, hypervisor) sampling enable bits. But these bits apply
38 * to both counters, thus the two counters can't be enabled/disabled
41 * The control register has two event fields, one for each of the two
42 * counters. It's thus nearly impossible to have one counter going
43 * while keeping the other one stopped. Therefore it is possible to
44 * get overflow interrupts for counters not currently "in use" and
45 * that condition must be checked in the overflow interrupt handler.
47 * So we use a hack, in that we program inactive counters with the
48 * "sw_count0" and "sw_count1" events. These count how many times
49 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
50 * unusual way to encode a NOP and therefore will not trigger in
54 #define MAX_HWEVENTS 2
55 #define MAX_PERIOD ((1UL << 32) - 1)
57 #define PIC_UPPER_INDEX 0
58 #define PIC_LOWER_INDEX 1
59 #define PIC_NO_INDEX -1
61 struct cpu_hw_events {
62 /* Number of events currently scheduled onto this cpu.
63 * This tells how many entries in the arrays below
68 /* Number of new events added since the last hw_perf_disable().
69 * This works because the perf event layer always adds new
70 * events inside of a perf_{disable,enable}() sequence.
74 /* Array of events current scheduled on this cpu. */
75 struct perf_event *event[MAX_HWEVENTS];
77 /* Array of encoded longs, specifying the %pcr register
78 * encoding and the mask of PIC counters this even can
79 * be scheduled on. See perf_event_encode() et al.
81 unsigned long events[MAX_HWEVENTS];
83 /* The current counter index assigned to an event. When the
84 * event hasn't been programmed into the cpu yet, this will
85 * hold PIC_NO_INDEX. The event->hw.idx value tells us where
86 * we ought to schedule the event.
88 int current_idx[MAX_HWEVENTS];
90 /* Software copy of %pcr register on this cpu. */
93 /* Enabled/disable state. */
96 unsigned int group_flag;
98 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
100 /* An event map describes the characteristics of a performance
101 * counter event. In particular it gives the encoding as well as
102 * a mask telling which counters the event can be measured on.
104 struct perf_event_map {
107 #define PIC_NONE 0x00
108 #define PIC_UPPER 0x01
109 #define PIC_LOWER 0x02
112 /* Encode a perf_event_map entry into a long. */
113 static unsigned long perf_event_encode(const struct perf_event_map *pmap)
115 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
118 static u8 perf_event_get_msk(unsigned long val)
123 static u64 perf_event_get_enc(unsigned long val)
128 #define C(x) PERF_COUNT_HW_CACHE_##x
130 #define CACHE_OP_UNSUPPORTED 0xfffe
131 #define CACHE_OP_NONSENSE 0xffff
133 typedef struct perf_event_map cache_map_t
134 [PERF_COUNT_HW_CACHE_MAX]
135 [PERF_COUNT_HW_CACHE_OP_MAX]
136 [PERF_COUNT_HW_CACHE_RESULT_MAX];
139 const struct perf_event_map *(*event_map)(int);
140 const cache_map_t *cache_map;
151 static const struct perf_event_map ultra3_perfmon_event_map[] = {
152 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
153 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
154 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
155 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
158 static const struct perf_event_map *ultra3_event_map(int event_id)
160 return &ultra3_perfmon_event_map[event_id];
163 static const cache_map_t ultra3_cache_map = {
166 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
167 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
170 [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
171 [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
174 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
175 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
180 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
181 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
184 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
185 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
187 [ C(OP_PREFETCH) ] = {
188 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
189 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
194 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
195 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
198 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
199 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
202 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
203 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
208 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
209 [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
212 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
213 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
215 [ C(OP_PREFETCH) ] = {
216 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
217 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
222 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
223 [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
226 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
227 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
229 [ C(OP_PREFETCH) ] = {
230 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
231 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
236 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
237 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
240 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
241 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
243 [ C(OP_PREFETCH) ] = {
244 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
245 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
250 static const struct sparc_pmu ultra3_pmu = {
251 .event_map = ultra3_event_map,
252 .cache_map = &ultra3_cache_map,
253 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
261 /* Niagara1 is very limited. The upper PIC is hard-locked to count
262 * only instructions, so it is free running which creates all kinds of
263 * problems. Some hardware designs make one wonder if the creator
264 * even looked at how this stuff gets used by software.
266 static const struct perf_event_map niagara1_perfmon_event_map[] = {
267 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
268 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
269 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
270 [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
273 static const struct perf_event_map *niagara1_event_map(int event_id)
275 return &niagara1_perfmon_event_map[event_id];
278 static const cache_map_t niagara1_cache_map = {
281 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
282 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
285 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
286 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
289 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
290 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
295 [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
296 [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
299 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
300 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
302 [ C(OP_PREFETCH) ] = {
303 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
304 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
309 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
310 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
313 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
314 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
317 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
318 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
323 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
324 [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
327 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
328 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
330 [ C(OP_PREFETCH) ] = {
331 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
332 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
337 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
338 [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
341 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
342 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
344 [ C(OP_PREFETCH) ] = {
345 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
346 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
351 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
352 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
355 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
356 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
358 [ C(OP_PREFETCH) ] = {
359 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
360 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
365 static const struct sparc_pmu niagara1_pmu = {
366 .event_map = niagara1_event_map,
367 .cache_map = &niagara1_cache_map,
368 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
376 static const struct perf_event_map niagara2_perfmon_event_map[] = {
377 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
378 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
379 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
380 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
381 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
382 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
385 static const struct perf_event_map *niagara2_event_map(int event_id)
387 return &niagara2_perfmon_event_map[event_id];
390 static const cache_map_t niagara2_cache_map = {
393 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
394 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
397 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
398 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
401 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
402 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
407 [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
408 [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
411 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
412 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
414 [ C(OP_PREFETCH) ] = {
415 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
416 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
421 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
422 [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
425 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
426 [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
429 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
430 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
435 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
436 [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
439 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
440 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
442 [ C(OP_PREFETCH) ] = {
443 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
444 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
449 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
450 [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
453 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
454 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
456 [ C(OP_PREFETCH) ] = {
457 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
458 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
463 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
464 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
467 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
468 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
470 [ C(OP_PREFETCH) ] = {
471 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
472 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
477 static const struct sparc_pmu niagara2_pmu = {
478 .event_map = niagara2_event_map,
479 .cache_map = &niagara2_cache_map,
480 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
490 static const struct sparc_pmu *sparc_pmu __read_mostly;
492 static u64 event_encoding(u64 event_id, int idx)
494 if (idx == PIC_UPPER_INDEX)
495 event_id <<= sparc_pmu->upper_shift;
497 event_id <<= sparc_pmu->lower_shift;
501 static u64 mask_for_index(int idx)
503 return event_encoding(sparc_pmu->event_mask, idx);
506 static u64 nop_for_index(int idx)
508 return event_encoding(idx == PIC_UPPER_INDEX ?
509 sparc_pmu->upper_nop :
510 sparc_pmu->lower_nop, idx);
513 static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
515 u64 val, mask = mask_for_index(idx);
522 pcr_ops->write(cpuc->pcr);
525 static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
527 u64 mask = mask_for_index(idx);
528 u64 nop = nop_for_index(idx);
536 pcr_ops->write(cpuc->pcr);
539 static u32 read_pmc(int idx)
544 if (idx == PIC_UPPER_INDEX)
547 return val & 0xffffffff;
550 static void write_pmc(int idx, u64 val)
552 u64 shift, mask, pic;
555 if (idx == PIC_UPPER_INDEX)
558 mask = ((u64) 0xffffffff) << shift;
567 static u64 sparc_perf_event_update(struct perf_event *event,
568 struct hw_perf_event *hwc, int idx)
571 u64 prev_raw_count, new_raw_count;
575 prev_raw_count = atomic64_read(&hwc->prev_count);
576 new_raw_count = read_pmc(idx);
578 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
579 new_raw_count) != prev_raw_count)
582 delta = (new_raw_count << shift) - (prev_raw_count << shift);
585 atomic64_add(delta, &event->count);
586 atomic64_sub(delta, &hwc->period_left);
588 return new_raw_count;
591 static int sparc_perf_event_set_period(struct perf_event *event,
592 struct hw_perf_event *hwc, int idx)
594 s64 left = atomic64_read(&hwc->period_left);
595 s64 period = hwc->sample_period;
598 if (unlikely(left <= -period)) {
600 atomic64_set(&hwc->period_left, left);
601 hwc->last_period = period;
605 if (unlikely(left <= 0)) {
607 atomic64_set(&hwc->period_left, left);
608 hwc->last_period = period;
611 if (left > MAX_PERIOD)
614 atomic64_set(&hwc->prev_count, (u64)-left);
616 write_pmc(idx, (u64)(-left) & 0xffffffff);
618 perf_event_update_userpage(event);
623 /* If performance event entries have been added, move existing
624 * events around (if necessary) and then assign new entries to
627 static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
634 /* Read in the counters which are moving. */
635 for (i = 0; i < cpuc->n_events; i++) {
636 struct perf_event *cp = cpuc->event[i];
638 if (cpuc->current_idx[i] != PIC_NO_INDEX &&
639 cpuc->current_idx[i] != cp->hw.idx) {
640 sparc_perf_event_update(cp, &cp->hw,
641 cpuc->current_idx[i]);
642 cpuc->current_idx[i] = PIC_NO_INDEX;
646 /* Assign to counters all unassigned events. */
647 for (i = 0; i < cpuc->n_events; i++) {
648 struct perf_event *cp = cpuc->event[i];
649 struct hw_perf_event *hwc = &cp->hw;
653 if (cpuc->current_idx[i] != PIC_NO_INDEX)
656 sparc_perf_event_set_period(cp, hwc, idx);
657 cpuc->current_idx[i] = idx;
659 enc = perf_event_get_enc(cpuc->events[i]);
660 pcr |= event_encoding(enc, idx);
666 void hw_perf_enable(void)
668 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
678 if (!cpuc->n_events) {
681 pcr = maybe_change_configuration(cpuc, pcr);
683 /* We require that all of the events have the same
684 * configuration, so just fetch the settings from the
687 cpuc->pcr = pcr | cpuc->event[0]->hw.config_base;
690 pcr_ops->write(cpuc->pcr);
693 void hw_perf_disable(void)
695 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
705 val &= ~(PCR_UTRACE | PCR_STRACE |
706 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
709 pcr_ops->write(cpuc->pcr);
712 static void sparc_pmu_disable(struct perf_event *event)
714 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
715 struct hw_perf_event *hwc = &event->hw;
719 local_irq_save(flags);
722 for (i = 0; i < cpuc->n_events; i++) {
723 if (event == cpuc->event[i]) {
724 int idx = cpuc->current_idx[i];
726 /* Shift remaining entries down into
729 while (++i < cpuc->n_events) {
730 cpuc->event[i - 1] = cpuc->event[i];
731 cpuc->events[i - 1] = cpuc->events[i];
732 cpuc->current_idx[i - 1] =
733 cpuc->current_idx[i];
736 /* Absorb the final count and turn off the
739 sparc_pmu_disable_event(cpuc, hwc, idx);
741 sparc_perf_event_update(event, hwc, idx);
743 perf_event_update_userpage(event);
751 local_irq_restore(flags);
754 static int active_event_index(struct cpu_hw_events *cpuc,
755 struct perf_event *event)
759 for (i = 0; i < cpuc->n_events; i++) {
760 if (cpuc->event[i] == event)
763 BUG_ON(i == cpuc->n_events);
764 return cpuc->current_idx[i];
767 static void sparc_pmu_read(struct perf_event *event)
769 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
770 int idx = active_event_index(cpuc, event);
771 struct hw_perf_event *hwc = &event->hw;
773 sparc_perf_event_update(event, hwc, idx);
776 static void sparc_pmu_unthrottle(struct perf_event *event)
778 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
779 int idx = active_event_index(cpuc, event);
780 struct hw_perf_event *hwc = &event->hw;
782 sparc_pmu_enable_event(cpuc, hwc, idx);
785 static atomic_t active_events = ATOMIC_INIT(0);
786 static DEFINE_MUTEX(pmc_grab_mutex);
788 static void perf_stop_nmi_watchdog(void *unused)
790 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
792 stop_nmi_watchdog(NULL);
793 cpuc->pcr = pcr_ops->read();
796 void perf_event_grab_pmc(void)
798 if (atomic_inc_not_zero(&active_events))
801 mutex_lock(&pmc_grab_mutex);
802 if (atomic_read(&active_events) == 0) {
803 if (atomic_read(&nmi_active) > 0) {
804 on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
805 BUG_ON(atomic_read(&nmi_active) != 0);
807 atomic_inc(&active_events);
809 mutex_unlock(&pmc_grab_mutex);
812 void perf_event_release_pmc(void)
814 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
815 if (atomic_read(&nmi_active) == 0)
816 on_each_cpu(start_nmi_watchdog, NULL, 1);
817 mutex_unlock(&pmc_grab_mutex);
821 static const struct perf_event_map *sparc_map_cache_event(u64 config)
823 unsigned int cache_type, cache_op, cache_result;
824 const struct perf_event_map *pmap;
826 if (!sparc_pmu->cache_map)
827 return ERR_PTR(-ENOENT);
829 cache_type = (config >> 0) & 0xff;
830 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
831 return ERR_PTR(-EINVAL);
833 cache_op = (config >> 8) & 0xff;
834 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
835 return ERR_PTR(-EINVAL);
837 cache_result = (config >> 16) & 0xff;
838 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
839 return ERR_PTR(-EINVAL);
841 pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
843 if (pmap->encoding == CACHE_OP_UNSUPPORTED)
844 return ERR_PTR(-ENOENT);
846 if (pmap->encoding == CACHE_OP_NONSENSE)
847 return ERR_PTR(-EINVAL);
852 static void hw_perf_event_destroy(struct perf_event *event)
854 perf_event_release_pmc();
857 /* Make sure all events can be scheduled into the hardware at
858 * the same time. This is simplified by the fact that we only
859 * need to support 2 simultaneous HW events.
861 * As a side effect, the evts[]->hw.idx values will be assigned
862 * on success. These are pending indexes. When the events are
863 * actually programmed into the chip, these values will propagate
864 * to the per-cpu cpuc->current_idx[] slots, see the code in
865 * maybe_change_configuration() for details.
867 static int sparc_check_constraints(struct perf_event **evts,
868 unsigned long *events, int n_ev)
870 u8 msk0 = 0, msk1 = 0;
873 /* This case is possible when we are invoked from
874 * hw_perf_group_sched_in().
879 if (n_ev > perf_max_events)
882 msk0 = perf_event_get_msk(events[0]);
884 if (msk0 & PIC_LOWER)
889 msk1 = perf_event_get_msk(events[1]);
891 /* If both events can go on any counter, OK. */
892 if (msk0 == (PIC_UPPER | PIC_LOWER) &&
893 msk1 == (PIC_UPPER | PIC_LOWER))
896 /* If one event is limited to a specific counter,
897 * and the other can go on both, OK.
899 if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
900 msk1 == (PIC_UPPER | PIC_LOWER)) {
901 if (msk0 & PIC_LOWER)
906 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
907 msk0 == (PIC_UPPER | PIC_LOWER)) {
908 if (msk1 & PIC_UPPER)
913 /* If the events are fixed to different counters, OK. */
914 if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
915 (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
916 if (msk0 & PIC_LOWER)
921 /* Otherwise, there is a conflict. */
925 evts[0]->hw.idx = idx0;
927 evts[1]->hw.idx = idx0 ^ 1;
931 static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
933 int eu = 0, ek = 0, eh = 0;
934 struct perf_event *event;
942 for (i = 0; i < n; i++) {
945 eu = event->attr.exclude_user;
946 ek = event->attr.exclude_kernel;
947 eh = event->attr.exclude_hv;
949 } else if (event->attr.exclude_user != eu ||
950 event->attr.exclude_kernel != ek ||
951 event->attr.exclude_hv != eh) {
959 static int collect_events(struct perf_event *group, int max_count,
960 struct perf_event *evts[], unsigned long *events,
963 struct perf_event *event;
966 if (!is_software_event(group)) {
970 events[n] = group->hw.event_base;
971 current_idx[n++] = PIC_NO_INDEX;
973 list_for_each_entry(event, &group->sibling_list, group_entry) {
974 if (!is_software_event(event) &&
975 event->state != PERF_EVENT_STATE_OFF) {
979 events[n] = event->hw.event_base;
980 current_idx[n++] = PIC_NO_INDEX;
986 static int sparc_pmu_enable(struct perf_event *event)
988 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
989 int n0, ret = -EAGAIN;
992 local_irq_save(flags);
996 if (n0 >= perf_max_events)
999 cpuc->event[n0] = event;
1000 cpuc->events[n0] = event->hw.event_base;
1001 cpuc->current_idx[n0] = PIC_NO_INDEX;
1004 * If group events scheduling transaction was started,
1005 * skip the schedulability test here, it will be peformed
1006 * at commit time(->commit_txn) as a whole
1008 if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
1011 if (check_excludes(cpuc->event, n0, 1))
1013 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1023 local_irq_restore(flags);
1027 static int __hw_perf_event_init(struct perf_event *event)
1029 struct perf_event_attr *attr = &event->attr;
1030 struct perf_event *evts[MAX_HWEVENTS];
1031 struct hw_perf_event *hwc = &event->hw;
1032 unsigned long events[MAX_HWEVENTS];
1033 int current_idx_dmy[MAX_HWEVENTS];
1034 const struct perf_event_map *pmap;
1037 if (atomic_read(&nmi_active) < 0)
1040 if (attr->type == PERF_TYPE_HARDWARE) {
1041 if (attr->config >= sparc_pmu->max_events)
1043 pmap = sparc_pmu->event_map(attr->config);
1044 } else if (attr->type == PERF_TYPE_HW_CACHE) {
1045 pmap = sparc_map_cache_event(attr->config);
1047 return PTR_ERR(pmap);
1051 /* We save the enable bits in the config_base. */
1052 hwc->config_base = sparc_pmu->irq_bit;
1053 if (!attr->exclude_user)
1054 hwc->config_base |= PCR_UTRACE;
1055 if (!attr->exclude_kernel)
1056 hwc->config_base |= PCR_STRACE;
1057 if (!attr->exclude_hv)
1058 hwc->config_base |= sparc_pmu->hv_bit;
1060 hwc->event_base = perf_event_encode(pmap);
1063 if (event->group_leader != event) {
1064 n = collect_events(event->group_leader,
1065 perf_max_events - 1,
1066 evts, events, current_idx_dmy);
1070 events[n] = hwc->event_base;
1073 if (check_excludes(evts, n, 1))
1076 if (sparc_check_constraints(evts, events, n + 1))
1079 hwc->idx = PIC_NO_INDEX;
1081 /* Try to do all error checking before this point, as unwinding
1082 * state after grabbing the PMC is difficult.
1084 perf_event_grab_pmc();
1085 event->destroy = hw_perf_event_destroy;
1087 if (!hwc->sample_period) {
1088 hwc->sample_period = MAX_PERIOD;
1089 hwc->last_period = hwc->sample_period;
1090 atomic64_set(&hwc->period_left, hwc->sample_period);
1097 * Start group events scheduling transaction
1098 * Set the flag to make pmu::enable() not perform the
1099 * schedulability test, it will be performed at commit time
1101 static void sparc_pmu_start_txn(const struct pmu *pmu)
1103 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1105 cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
1109 * Stop group events scheduling transaction
1110 * Clear the flag and pmu::enable() will perform the
1111 * schedulability test.
1113 static void sparc_pmu_cancel_txn(const struct pmu *pmu)
1115 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1117 cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
1121 * Commit group events scheduling transaction
1122 * Perform the group schedulability test as a whole
1123 * Return 0 if success
1125 static int sparc_pmu_commit_txn(const struct pmu *pmu)
1127 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1133 cpuc = &__get_cpu_var(cpu_hw_events);
1135 if (check_excludes(cpuc->event, 0, n))
1137 if (sparc_check_constraints(cpuc->event, cpuc->events, n))
1143 static const struct pmu pmu = {
1144 .enable = sparc_pmu_enable,
1145 .disable = sparc_pmu_disable,
1146 .read = sparc_pmu_read,
1147 .unthrottle = sparc_pmu_unthrottle,
1148 .start_txn = sparc_pmu_start_txn,
1149 .cancel_txn = sparc_pmu_cancel_txn,
1150 .commit_txn = sparc_pmu_commit_txn,
1153 const struct pmu *hw_perf_event_init(struct perf_event *event)
1155 int err = __hw_perf_event_init(event);
1158 return ERR_PTR(err);
1162 void perf_event_print_debug(void)
1164 unsigned long flags;
1171 local_irq_save(flags);
1173 cpu = smp_processor_id();
1175 pcr = pcr_ops->read();
1179 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
1182 local_irq_restore(flags);
1185 static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1186 unsigned long cmd, void *__args)
1188 struct die_args *args = __args;
1189 struct perf_sample_data data;
1190 struct cpu_hw_events *cpuc;
1191 struct pt_regs *regs;
1194 if (!atomic_read(&active_events))
1207 perf_sample_data_init(&data, 0);
1209 cpuc = &__get_cpu_var(cpu_hw_events);
1211 /* If the PMU has the TOE IRQ enable bits, we need to do a
1212 * dummy write to the %pcr to clear the overflow bits and thus
1215 * Do this before we peek at the counters to determine
1216 * overflow so we don't lose any events.
1218 if (sparc_pmu->irq_bit)
1219 pcr_ops->write(cpuc->pcr);
1221 for (i = 0; i < cpuc->n_events; i++) {
1222 struct perf_event *event = cpuc->event[i];
1223 int idx = cpuc->current_idx[i];
1224 struct hw_perf_event *hwc;
1228 val = sparc_perf_event_update(event, hwc, idx);
1229 if (val & (1ULL << 31))
1232 data.period = event->hw.last_period;
1233 if (!sparc_perf_event_set_period(event, hwc, idx))
1236 if (perf_event_overflow(event, 1, &data, regs))
1237 sparc_pmu_disable_event(cpuc, hwc, idx);
1243 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1244 .notifier_call = perf_event_nmi_handler,
1247 static bool __init supported_pmu(void)
1249 if (!strcmp(sparc_pmu_type, "ultra3") ||
1250 !strcmp(sparc_pmu_type, "ultra3+") ||
1251 !strcmp(sparc_pmu_type, "ultra3i") ||
1252 !strcmp(sparc_pmu_type, "ultra4+")) {
1253 sparc_pmu = &ultra3_pmu;
1256 if (!strcmp(sparc_pmu_type, "niagara")) {
1257 sparc_pmu = &niagara1_pmu;
1260 if (!strcmp(sparc_pmu_type, "niagara2")) {
1261 sparc_pmu = &niagara2_pmu;
1267 void __init init_hw_perf_events(void)
1269 pr_info("Performance events: ");
1271 if (!supported_pmu()) {
1272 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
1276 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1278 /* All sparc64 PMUs currently have 2 events. */
1279 perf_max_events = 2;
1281 register_die_notifier(&perf_event_nmi_notifier);
1284 static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
1286 if (entry->nr < PERF_MAX_STACK_DEPTH)
1287 entry->ip[entry->nr++] = ip;
1290 static void perf_callchain_kernel(struct pt_regs *regs,
1291 struct perf_callchain_entry *entry)
1293 unsigned long ksp, fp;
1294 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1298 callchain_store(entry, PERF_CONTEXT_KERNEL);
1299 callchain_store(entry, regs->tpc);
1301 ksp = regs->u_regs[UREG_I6];
1302 fp = ksp + STACK_BIAS;
1304 struct sparc_stackf *sf;
1305 struct pt_regs *regs;
1308 if (!kstack_valid(current_thread_info(), fp))
1311 sf = (struct sparc_stackf *) fp;
1312 regs = (struct pt_regs *) (sf + 1);
1314 if (kstack_is_trap_frame(current_thread_info(), regs)) {
1315 if (user_mode(regs))
1318 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
1320 pc = sf->callers_pc;
1321 fp = (unsigned long)sf->fp + STACK_BIAS;
1323 callchain_store(entry, pc);
1324 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1325 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
1326 int index = current->curr_ret_stack;
1327 if (current->ret_stack && index >= graph) {
1328 pc = current->ret_stack[index - graph].ret;
1329 callchain_store(entry, pc);
1334 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1337 static void perf_callchain_user_64(struct pt_regs *regs,
1338 struct perf_callchain_entry *entry)
1342 callchain_store(entry, PERF_CONTEXT_USER);
1343 callchain_store(entry, regs->tpc);
1345 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1347 struct sparc_stackf *usf, sf;
1350 usf = (struct sparc_stackf *) ufp;
1351 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1355 ufp = (unsigned long)sf.fp + STACK_BIAS;
1356 callchain_store(entry, pc);
1357 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1360 static void perf_callchain_user_32(struct pt_regs *regs,
1361 struct perf_callchain_entry *entry)
1365 callchain_store(entry, PERF_CONTEXT_USER);
1366 callchain_store(entry, regs->tpc);
1368 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
1370 struct sparc_stackf32 *usf, sf;
1373 usf = (struct sparc_stackf32 *) ufp;
1374 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1378 ufp = (unsigned long)sf.fp;
1379 callchain_store(entry, pc);
1380 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1383 /* Like powerpc we can't get PMU interrupts within the PMU handler,
1384 * so no need for separate NMI and IRQ chains as on x86.
1386 static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
1388 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1390 struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
1393 if (!user_mode(regs)) {
1394 stack_trace_flush();
1395 perf_callchain_kernel(regs, entry);
1397 regs = task_pt_regs(current);
1403 if (test_thread_flag(TIF_32BIT))
1404 perf_callchain_user_32(regs, entry);
1406 perf_callchain_user_64(regs, entry);