2 * Linux performance counter support for MIPS.
4 * Copyright (C) 2010 MIPS Technologies, Inc.
5 * Copyright (C) 2011 Cavium Networks, Inc.
6 * Author: Deng-Cheng Zhu
8 * This code is based on the implementation for ARM, which is in turn
9 * based on the sparc64 perf event code and the x86 code. Performance
10 * counter access is based on the MIPS Oprofile code. And the callchain
11 * support references the code of MIPS stacktrace.c.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #include <linux/cpumask.h>
19 #include <linux/interrupt.h>
20 #include <linux/smp.h>
21 #include <linux/kernel.h>
22 #include <linux/perf_event.h>
23 #include <linux/uaccess.h>
26 #include <asm/irq_regs.h>
27 #include <asm/stacktrace.h>
28 #include <asm/time.h> /* For perf_irq */
30 #define MIPS_MAX_HWEVENTS 4
31 #define MIPS_TCS_PER_COUNTER 2
32 #define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
34 struct cpu_hw_events {
35 /* Array of events on this cpu. */
36 struct perf_event *events[MIPS_MAX_HWEVENTS];
39 * Set the bit (indexed by the counter number) when the counter
40 * is used for an event.
42 unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
45 * Software copy of the control register for each performance counter.
46 * MIPS CPUs vary in performance counters. They use this differently,
47 * and even may not use it.
49 unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
51 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
55 /* The description of MIPS performance events. */
56 struct mips_perf_event {
57 unsigned int event_id;
59 * MIPS performance counters are indexed starting from 0.
60 * CNTR_EVEN indicates the indexes of the counters to be used are
63 unsigned int cntr_mask;
64 #define CNTR_EVEN 0x55555555
65 #define CNTR_ODD 0xaaaaaaaa
66 #define CNTR_ALL 0xffffffff
67 #ifdef CONFIG_MIPS_MT_SMP
80 static struct mips_perf_event raw_event;
81 static DEFINE_MUTEX(raw_event_mutex);
83 #define C(x) PERF_COUNT_HW_CACHE_##x
91 u64 (*read_counter)(unsigned int idx);
92 void (*write_counter)(unsigned int idx, u64 val);
93 const struct mips_perf_event *(*map_raw_event)(u64 config);
94 const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
95 const struct mips_perf_event (*cache_event_map)
96 [PERF_COUNT_HW_CACHE_MAX]
97 [PERF_COUNT_HW_CACHE_OP_MAX]
98 [PERF_COUNT_HW_CACHE_RESULT_MAX];
99 unsigned int num_counters;
102 static struct mips_pmu mipspmu;
104 #define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \
106 #define M_PERFCTL_VPEID(vpe) ((vpe) << MIPS_PERFCTRL_VPEID_S)
108 #ifdef CONFIG_CPU_BMIPS5000
109 #define M_PERFCTL_MT_EN(filter) 0
110 #else /* !CONFIG_CPU_BMIPS5000 */
111 #define M_PERFCTL_MT_EN(filter) (filter)
112 #endif /* CONFIG_CPU_BMIPS5000 */
114 #define M_TC_EN_ALL M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_ALL)
115 #define M_TC_EN_VPE M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_VPE)
116 #define M_TC_EN_TC M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_TC)
118 #define M_PERFCTL_COUNT_EVENT_WHENEVER (MIPS_PERFCTRL_EXL | \
124 #ifdef CONFIG_MIPS_MT_SMP
125 #define M_PERFCTL_CONFIG_MASK 0x3fff801f
127 #define M_PERFCTL_CONFIG_MASK 0x1f
131 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
132 static int cpu_has_mipsmt_pertccounters;
134 static DEFINE_RWLOCK(pmuint_rwlock);
136 #if defined(CONFIG_CPU_BMIPS5000)
137 #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
138 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
141 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
142 * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
144 #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
145 0 : smp_processor_id())
148 /* Copied from op_model_mipsxx.c */
149 static unsigned int vpe_shift(void)
151 if (num_possible_cpus() > 1)
157 static unsigned int counters_total_to_per_cpu(unsigned int counters)
159 return counters >> vpe_shift();
162 #else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
165 #endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
167 static void resume_local_counters(void);
168 static void pause_local_counters(void);
169 static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
170 static int mipsxx_pmu_handle_shared_irq(void);
172 static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
179 static u64 mipsxx_pmu_read_counter(unsigned int idx)
181 idx = mipsxx_pmu_swizzle_perf_idx(idx);
186 * The counters are unsigned, we must cast to truncate
189 return (u32)read_c0_perfcntr0();
191 return (u32)read_c0_perfcntr1();
193 return (u32)read_c0_perfcntr2();
195 return (u32)read_c0_perfcntr3();
197 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
202 static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
204 idx = mipsxx_pmu_swizzle_perf_idx(idx);
208 return read_c0_perfcntr0_64();
210 return read_c0_perfcntr1_64();
212 return read_c0_perfcntr2_64();
214 return read_c0_perfcntr3_64();
216 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
221 static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
223 idx = mipsxx_pmu_swizzle_perf_idx(idx);
227 write_c0_perfcntr0(val);
230 write_c0_perfcntr1(val);
233 write_c0_perfcntr2(val);
236 write_c0_perfcntr3(val);
241 static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
243 idx = mipsxx_pmu_swizzle_perf_idx(idx);
247 write_c0_perfcntr0_64(val);
250 write_c0_perfcntr1_64(val);
253 write_c0_perfcntr2_64(val);
256 write_c0_perfcntr3_64(val);
261 static unsigned int mipsxx_pmu_read_control(unsigned int idx)
263 idx = mipsxx_pmu_swizzle_perf_idx(idx);
267 return read_c0_perfctrl0();
269 return read_c0_perfctrl1();
271 return read_c0_perfctrl2();
273 return read_c0_perfctrl3();
275 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
280 static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
282 idx = mipsxx_pmu_swizzle_perf_idx(idx);
286 write_c0_perfctrl0(val);
289 write_c0_perfctrl1(val);
292 write_c0_perfctrl2(val);
295 write_c0_perfctrl3(val);
300 static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
301 struct hw_perf_event *hwc)
306 * We only need to care the counter mask. The range has been
307 * checked definitely.
309 unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
311 for (i = mipspmu.num_counters - 1; i >= 0; i--) {
313 * Note that some MIPS perf events can be counted by both
314 * even and odd counters, wheresas many other are only by
315 * even _or_ odd counters. This introduces an issue that
316 * when the former kind of event takes the counter the
317 * latter kind of event wants to use, then the "counter
318 * allocation" for the latter event will fail. In fact if
319 * they can be dynamically swapped, they both feel happy.
320 * But here we leave this issue alone for now.
322 if (test_bit(i, &cntr_mask) &&
323 !test_and_set_bit(i, cpuc->used_mask))
330 static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
332 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
334 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
336 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
337 (evt->config_base & M_PERFCTL_CONFIG_MASK) |
338 /* Make sure interrupt enabled. */
340 if (IS_ENABLED(CONFIG_CPU_BMIPS5000))
341 /* enable the counter for the calling thread */
342 cpuc->saved_ctrl[idx] |=
343 (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
346 * We do not actually let the counter run. Leave it until start().
350 static void mipsxx_pmu_disable_event(int idx)
352 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
355 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
357 local_irq_save(flags);
358 cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
359 ~M_PERFCTL_COUNT_EVENT_WHENEVER;
360 mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
361 local_irq_restore(flags);
364 static int mipspmu_event_set_period(struct perf_event *event,
365 struct hw_perf_event *hwc,
368 u64 left = local64_read(&hwc->period_left);
369 u64 period = hwc->sample_period;
372 if (unlikely((left + period) & (1ULL << 63))) {
373 /* left underflowed by more than period. */
375 local64_set(&hwc->period_left, left);
376 hwc->last_period = period;
378 } else if (unlikely((left + period) <= period)) {
379 /* left underflowed by less than period. */
381 local64_set(&hwc->period_left, left);
382 hwc->last_period = period;
386 if (left > mipspmu.max_period) {
387 left = mipspmu.max_period;
388 local64_set(&hwc->period_left, left);
391 local64_set(&hwc->prev_count, mipspmu.overflow - left);
393 mipspmu.write_counter(idx, mipspmu.overflow - left);
395 perf_event_update_userpage(event);
400 static void mipspmu_event_update(struct perf_event *event,
401 struct hw_perf_event *hwc,
404 u64 prev_raw_count, new_raw_count;
408 prev_raw_count = local64_read(&hwc->prev_count);
409 new_raw_count = mipspmu.read_counter(idx);
411 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
412 new_raw_count) != prev_raw_count)
415 delta = new_raw_count - prev_raw_count;
417 local64_add(delta, &event->count);
418 local64_sub(delta, &hwc->period_left);
421 static void mipspmu_start(struct perf_event *event, int flags)
423 struct hw_perf_event *hwc = &event->hw;
425 if (flags & PERF_EF_RELOAD)
426 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
430 /* Set the period for the event. */
431 mipspmu_event_set_period(event, hwc, hwc->idx);
433 /* Enable the event. */
434 mipsxx_pmu_enable_event(hwc, hwc->idx);
437 static void mipspmu_stop(struct perf_event *event, int flags)
439 struct hw_perf_event *hwc = &event->hw;
441 if (!(hwc->state & PERF_HES_STOPPED)) {
442 /* We are working on a local event. */
443 mipsxx_pmu_disable_event(hwc->idx);
445 mipspmu_event_update(event, hwc, hwc->idx);
446 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
450 static int mipspmu_add(struct perf_event *event, int flags)
452 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
453 struct hw_perf_event *hwc = &event->hw;
457 perf_pmu_disable(event->pmu);
459 /* To look for a free counter for this event. */
460 idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
467 * If there is an event in the counter we are going to use then
468 * make sure it is disabled.
471 mipsxx_pmu_disable_event(idx);
472 cpuc->events[idx] = event;
474 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
475 if (flags & PERF_EF_START)
476 mipspmu_start(event, PERF_EF_RELOAD);
478 /* Propagate our changes to the userspace mapping. */
479 perf_event_update_userpage(event);
482 perf_pmu_enable(event->pmu);
486 static void mipspmu_del(struct perf_event *event, int flags)
488 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
489 struct hw_perf_event *hwc = &event->hw;
492 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
494 mipspmu_stop(event, PERF_EF_UPDATE);
495 cpuc->events[idx] = NULL;
496 clear_bit(idx, cpuc->used_mask);
498 perf_event_update_userpage(event);
501 static void mipspmu_read(struct perf_event *event)
503 struct hw_perf_event *hwc = &event->hw;
505 /* Don't read disabled counters! */
509 mipspmu_event_update(event, hwc, hwc->idx);
512 static void mipspmu_enable(struct pmu *pmu)
514 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
515 write_unlock(&pmuint_rwlock);
517 resume_local_counters();
521 * MIPS performance counters can be per-TC. The control registers can
522 * not be directly accessed across CPUs. Hence if we want to do global
523 * control, we need cross CPU calls. on_each_cpu() can help us, but we
524 * can not make sure this function is called with interrupts enabled. So
525 * here we pause local counters and then grab a rwlock and leave the
526 * counters on other CPUs alone. If any counter interrupt raises while
527 * we own the write lock, simply pause local counters on that CPU and
528 * spin in the handler. Also we know we won't be switched to another
529 * CPU after pausing local counters and before grabbing the lock.
531 static void mipspmu_disable(struct pmu *pmu)
533 pause_local_counters();
534 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
535 write_lock(&pmuint_rwlock);
539 static atomic_t active_events = ATOMIC_INIT(0);
540 static DEFINE_MUTEX(pmu_reserve_mutex);
541 static int (*save_perf_irq)(void);
543 static int mipspmu_get_irq(void)
547 if (mipspmu.irq >= 0) {
548 /* Request my own irq handler. */
549 err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
550 IRQF_PERCPU | IRQF_NOBALANCING |
551 IRQF_NO_THREAD | IRQF_NO_SUSPEND |
553 "mips_perf_pmu", &mipspmu);
555 pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
558 } else if (cp0_perfcount_irq < 0) {
560 * We are sharing the irq number with the timer interrupt.
562 save_perf_irq = perf_irq;
563 perf_irq = mipsxx_pmu_handle_shared_irq;
566 pr_warn("The platform hasn't properly defined its interrupt controller\n");
573 static void mipspmu_free_irq(void)
575 if (mipspmu.irq >= 0)
576 free_irq(mipspmu.irq, &mipspmu);
577 else if (cp0_perfcount_irq < 0)
578 perf_irq = save_perf_irq;
582 * mipsxx/rm9000/loongson2 have different performance counters, they have
583 * specific low-level init routines.
585 static void reset_counters(void *arg);
586 static int __hw_perf_event_init(struct perf_event *event);
588 static void hw_perf_event_destroy(struct perf_event *event)
590 if (atomic_dec_and_mutex_lock(&active_events,
591 &pmu_reserve_mutex)) {
593 * We must not call the destroy function with interrupts
596 on_each_cpu(reset_counters,
597 (void *)(long)mipspmu.num_counters, 1);
599 mutex_unlock(&pmu_reserve_mutex);
603 static int mipspmu_event_init(struct perf_event *event)
607 /* does not support taken branch sampling */
608 if (has_branch_stack(event))
611 switch (event->attr.type) {
613 case PERF_TYPE_HARDWARE:
614 case PERF_TYPE_HW_CACHE:
621 if (event->cpu >= nr_cpumask_bits ||
622 (event->cpu >= 0 && !cpu_online(event->cpu)))
625 if (!atomic_inc_not_zero(&active_events)) {
626 mutex_lock(&pmu_reserve_mutex);
627 if (atomic_read(&active_events) == 0)
628 err = mipspmu_get_irq();
631 atomic_inc(&active_events);
632 mutex_unlock(&pmu_reserve_mutex);
638 return __hw_perf_event_init(event);
641 static struct pmu pmu = {
642 .pmu_enable = mipspmu_enable,
643 .pmu_disable = mipspmu_disable,
644 .event_init = mipspmu_event_init,
647 .start = mipspmu_start,
648 .stop = mipspmu_stop,
649 .read = mipspmu_read,
652 static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
655 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
658 #ifdef CONFIG_MIPS_MT_SMP
659 return ((unsigned int)pev->range << 24) |
660 (pev->cntr_mask & 0xffff00) |
661 (pev->event_id & 0xff);
663 return (pev->cntr_mask & 0xffff00) |
664 (pev->event_id & 0xff);
668 static const struct mips_perf_event *mipspmu_map_general_event(int idx)
671 if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
672 return ERR_PTR(-EOPNOTSUPP);
673 return &(*mipspmu.general_event_map)[idx];
676 static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
678 unsigned int cache_type, cache_op, cache_result;
679 const struct mips_perf_event *pev;
681 cache_type = (config >> 0) & 0xff;
682 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
683 return ERR_PTR(-EINVAL);
685 cache_op = (config >> 8) & 0xff;
686 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
687 return ERR_PTR(-EINVAL);
689 cache_result = (config >> 16) & 0xff;
690 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
691 return ERR_PTR(-EINVAL);
693 pev = &((*mipspmu.cache_event_map)
698 if (pev->cntr_mask == 0)
699 return ERR_PTR(-EOPNOTSUPP);
705 static int validate_group(struct perf_event *event)
707 struct perf_event *sibling, *leader = event->group_leader;
708 struct cpu_hw_events fake_cpuc;
710 memset(&fake_cpuc, 0, sizeof(fake_cpuc));
712 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
715 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
716 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
720 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
726 /* This is needed by specific irq handlers in perf_event_*.c */
727 static void handle_associated_event(struct cpu_hw_events *cpuc,
728 int idx, struct perf_sample_data *data,
729 struct pt_regs *regs)
731 struct perf_event *event = cpuc->events[idx];
732 struct hw_perf_event *hwc = &event->hw;
734 mipspmu_event_update(event, hwc, idx);
735 data->period = event->hw.last_period;
736 if (!mipspmu_event_set_period(event, hwc, idx))
739 if (perf_event_overflow(event, data, regs))
740 mipsxx_pmu_disable_event(idx);
744 static int __n_counters(void)
748 if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
750 if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
752 if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
758 static int n_counters(void)
762 switch (current_cpu_type()) {
774 counters = __n_counters();
780 static void reset_counters(void *arg)
782 int counters = (int)(long)arg;
785 mipsxx_pmu_write_control(3, 0);
786 mipspmu.write_counter(3, 0);
788 mipsxx_pmu_write_control(2, 0);
789 mipspmu.write_counter(2, 0);
791 mipsxx_pmu_write_control(1, 0);
792 mipspmu.write_counter(1, 0);
794 mipsxx_pmu_write_control(0, 0);
795 mipspmu.write_counter(0, 0);
799 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
800 static const struct mips_perf_event mipsxxcore_event_map
801 [PERF_COUNT_HW_MAX] = {
802 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
803 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
804 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
805 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
808 /* 74K/proAptiv core has different branch event code. */
809 static const struct mips_perf_event mipsxxcore_event_map2
810 [PERF_COUNT_HW_MAX] = {
811 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
812 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
813 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
814 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
817 static const struct mips_perf_event i6400_event_map[PERF_COUNT_HW_MAX] = {
818 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD },
819 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD },
820 /* These only count dcache, not icache */
821 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x45, CNTR_EVEN | CNTR_ODD },
822 [PERF_COUNT_HW_CACHE_MISSES] = { 0x48, CNTR_EVEN | CNTR_ODD },
823 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD },
824 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x16, CNTR_EVEN | CNTR_ODD },
827 static const struct mips_perf_event loongson3_event_map[PERF_COUNT_HW_MAX] = {
828 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN },
829 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD },
830 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN },
831 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD },
834 static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
835 [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
836 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
837 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
838 [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
839 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
840 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
841 [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
844 static const struct mips_perf_event bmips5000_event_map
845 [PERF_COUNT_HW_MAX] = {
846 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
847 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
848 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
851 static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
852 [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
853 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */
854 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
855 [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
856 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */
857 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
860 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
861 static const struct mips_perf_event mipsxxcore_cache_map
862 [PERF_COUNT_HW_CACHE_MAX]
863 [PERF_COUNT_HW_CACHE_OP_MAX]
864 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
867 * Like some other architectures (e.g. ARM), the performance
868 * counters don't differentiate between read and write
869 * accesses/misses, so this isn't strictly correct, but it's the
870 * best we can do. Writes and reads get combined.
873 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
874 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
877 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
878 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
883 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
884 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
887 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
888 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
891 [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
893 * Note that MIPS has only "hit" events countable for
894 * the prefetch operation.
900 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
901 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
904 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
905 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
910 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
911 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
914 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
915 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
920 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
921 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
924 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
925 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
929 /* Using the same code for *HW_BRANCH* */
931 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
932 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
935 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
936 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
941 /* 74K/proAptiv core has completely different cache event map. */
942 static const struct mips_perf_event mipsxxcore_cache_map2
943 [PERF_COUNT_HW_CACHE_MAX]
944 [PERF_COUNT_HW_CACHE_OP_MAX]
945 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
948 * Like some other architectures (e.g. ARM), the performance
949 * counters don't differentiate between read and write
950 * accesses/misses, so this isn't strictly correct, but it's the
951 * best we can do. Writes and reads get combined.
954 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
955 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
958 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
959 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
964 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
965 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
968 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
969 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
972 [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
974 * Note that MIPS has only "hit" events countable for
975 * the prefetch operation.
981 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
982 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
985 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
986 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
990 * 74K core does not have specific DTLB events. proAptiv core has
991 * "speculative" DTLB events which are numbered 0x63 (even/odd) and
992 * not included here. One can use raw events if really needed.
996 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
997 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
1000 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
1001 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
1005 /* Using the same code for *HW_BRANCH* */
1007 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1008 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1011 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1012 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1017 static const struct mips_perf_event i6400_cache_map
1018 [PERF_COUNT_HW_CACHE_MAX]
1019 [PERF_COUNT_HW_CACHE_OP_MAX]
1020 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1023 [C(RESULT_ACCESS)] = { 0x46, CNTR_EVEN | CNTR_ODD },
1024 [C(RESULT_MISS)] = { 0x49, CNTR_EVEN | CNTR_ODD },
1027 [C(RESULT_ACCESS)] = { 0x47, CNTR_EVEN | CNTR_ODD },
1028 [C(RESULT_MISS)] = { 0x4a, CNTR_EVEN | CNTR_ODD },
1033 [C(RESULT_ACCESS)] = { 0x84, CNTR_EVEN | CNTR_ODD },
1034 [C(RESULT_MISS)] = { 0x85, CNTR_EVEN | CNTR_ODD },
1038 /* Can't distinguish read & write */
1040 [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD },
1041 [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD },
1044 [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD },
1045 [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD },
1049 /* Conditional branches / mispredicted */
1051 [C(RESULT_ACCESS)] = { 0x15, CNTR_EVEN | CNTR_ODD },
1052 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN | CNTR_ODD },
1057 static const struct mips_perf_event loongson3_cache_map
1058 [PERF_COUNT_HW_CACHE_MAX]
1059 [PERF_COUNT_HW_CACHE_OP_MAX]
1060 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1063 * Like some other architectures (e.g. ARM), the performance
1064 * counters don't differentiate between read and write
1065 * accesses/misses, so this isn't strictly correct, but it's the
1066 * best we can do. Writes and reads get combined.
1069 [C(RESULT_MISS)] = { 0x04, CNTR_ODD },
1072 [C(RESULT_MISS)] = { 0x04, CNTR_ODD },
1077 [C(RESULT_MISS)] = { 0x04, CNTR_EVEN },
1080 [C(RESULT_MISS)] = { 0x04, CNTR_EVEN },
1085 [C(RESULT_MISS)] = { 0x09, CNTR_ODD },
1088 [C(RESULT_MISS)] = { 0x09, CNTR_ODD },
1093 [C(RESULT_MISS)] = { 0x0c, CNTR_ODD },
1096 [C(RESULT_MISS)] = { 0x0c, CNTR_ODD },
1100 /* Using the same code for *HW_BRANCH* */
1102 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN },
1103 [C(RESULT_MISS)] = { 0x02, CNTR_ODD },
1106 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN },
1107 [C(RESULT_MISS)] = { 0x02, CNTR_ODD },
1113 static const struct mips_perf_event bmips5000_cache_map
1114 [PERF_COUNT_HW_CACHE_MAX]
1115 [PERF_COUNT_HW_CACHE_OP_MAX]
1116 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1119 * Like some other architectures (e.g. ARM), the performance
1120 * counters don't differentiate between read and write
1121 * accesses/misses, so this isn't strictly correct, but it's the
1122 * best we can do. Writes and reads get combined.
1125 [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
1126 [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
1129 [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
1130 [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
1135 [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
1136 [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
1139 [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
1140 [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
1142 [C(OP_PREFETCH)] = {
1143 [C(RESULT_ACCESS)] = { 23, CNTR_EVEN, T },
1145 * Note that MIPS has only "hit" events countable for
1146 * the prefetch operation.
1152 [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
1153 [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
1156 [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
1157 [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
1161 /* Using the same code for *HW_BRANCH* */
1163 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
1166 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
1172 static const struct mips_perf_event octeon_cache_map
1173 [PERF_COUNT_HW_CACHE_MAX]
1174 [PERF_COUNT_HW_CACHE_OP_MAX]
1175 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1178 [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL },
1179 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
1182 [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
1187 [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
1189 [C(OP_PREFETCH)] = {
1190 [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
1195 * Only general DTLB misses are counted use the same event for
1199 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1202 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1207 [C(RESULT_MISS)] = { 0x37, CNTR_ALL },
1212 static const struct mips_perf_event xlp_cache_map
1213 [PERF_COUNT_HW_CACHE_MAX]
1214 [PERF_COUNT_HW_CACHE_OP_MAX]
1215 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1218 [C(RESULT_ACCESS)] = { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */
1219 [C(RESULT_MISS)] = { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */
1222 [C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */
1223 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */
1228 [C(RESULT_ACCESS)] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
1229 [C(RESULT_MISS)] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
1234 [C(RESULT_ACCESS)] = { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */
1235 [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */
1238 [C(RESULT_ACCESS)] = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */
1239 [C(RESULT_MISS)] = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */
1244 * Only general DTLB misses are counted use the same event for
1248 [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1251 [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1256 [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1259 [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1264 [C(RESULT_MISS)] = { 0x25, CNTR_ALL },
1269 #ifdef CONFIG_MIPS_MT_SMP
1270 static void check_and_calc_range(struct perf_event *event,
1271 const struct mips_perf_event *pev)
1273 struct hw_perf_event *hwc = &event->hw;
1275 if (event->cpu >= 0) {
1276 if (pev->range > V) {
1278 * The user selected an event that is processor
1279 * wide, while expecting it to be VPE wide.
1281 hwc->config_base |= M_TC_EN_ALL;
1284 * FIXME: cpu_data[event->cpu].vpe_id reports 0
1287 hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
1288 hwc->config_base |= M_TC_EN_VPE;
1291 hwc->config_base |= M_TC_EN_ALL;
1294 static void check_and_calc_range(struct perf_event *event,
1295 const struct mips_perf_event *pev)
1300 static int __hw_perf_event_init(struct perf_event *event)
1302 struct perf_event_attr *attr = &event->attr;
1303 struct hw_perf_event *hwc = &event->hw;
1304 const struct mips_perf_event *pev;
1307 /* Returning MIPS event descriptor for generic perf event. */
1308 if (PERF_TYPE_HARDWARE == event->attr.type) {
1309 if (event->attr.config >= PERF_COUNT_HW_MAX)
1311 pev = mipspmu_map_general_event(event->attr.config);
1312 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1313 pev = mipspmu_map_cache_event(event->attr.config);
1314 } else if (PERF_TYPE_RAW == event->attr.type) {
1315 /* We are working on the global raw event. */
1316 mutex_lock(&raw_event_mutex);
1317 pev = mipspmu.map_raw_event(event->attr.config);
1319 /* The event type is not (yet) supported. */
1324 if (PERF_TYPE_RAW == event->attr.type)
1325 mutex_unlock(&raw_event_mutex);
1326 return PTR_ERR(pev);
1330 * We allow max flexibility on how each individual counter shared
1331 * by the single CPU operates (the mode exclusion and the range).
1333 hwc->config_base = MIPS_PERFCTRL_IE;
1335 /* Calculate range bits and validate it. */
1336 if (num_possible_cpus() > 1)
1337 check_and_calc_range(event, pev);
1339 hwc->event_base = mipspmu_perf_event_encode(pev);
1340 if (PERF_TYPE_RAW == event->attr.type)
1341 mutex_unlock(&raw_event_mutex);
1343 if (!attr->exclude_user)
1344 hwc->config_base |= MIPS_PERFCTRL_U;
1345 if (!attr->exclude_kernel) {
1346 hwc->config_base |= MIPS_PERFCTRL_K;
1347 /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1348 hwc->config_base |= MIPS_PERFCTRL_EXL;
1350 if (!attr->exclude_hv)
1351 hwc->config_base |= MIPS_PERFCTRL_S;
1353 hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1355 * The event can belong to another cpu. We do not assign a local
1356 * counter for it for now.
1361 if (!hwc->sample_period) {
1362 hwc->sample_period = mipspmu.max_period;
1363 hwc->last_period = hwc->sample_period;
1364 local64_set(&hwc->period_left, hwc->sample_period);
1368 if (event->group_leader != event)
1369 err = validate_group(event);
1371 event->destroy = hw_perf_event_destroy;
1374 event->destroy(event);
1379 static void pause_local_counters(void)
1381 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1382 int ctr = mipspmu.num_counters;
1383 unsigned long flags;
1385 local_irq_save(flags);
1388 cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1389 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1390 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1392 local_irq_restore(flags);
1395 static void resume_local_counters(void)
1397 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1398 int ctr = mipspmu.num_counters;
1402 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1406 static int mipsxx_pmu_handle_shared_irq(void)
1408 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1409 struct perf_sample_data data;
1410 unsigned int counters = mipspmu.num_counters;
1412 int handled = IRQ_NONE;
1413 struct pt_regs *regs;
1415 if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
1418 * First we pause the local counters, so that when we are locked
1419 * here, the counters are all paused. When it gets locked due to
1420 * perf_disable(), the timer interrupt handler will be delayed.
1422 * See also mipsxx_pmu_start().
1424 pause_local_counters();
1425 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1426 read_lock(&pmuint_rwlock);
1429 regs = get_irq_regs();
1431 perf_sample_data_init(&data, 0, 0);
1434 #define HANDLE_COUNTER(n) \
1436 if (test_bit(n, cpuc->used_mask)) { \
1437 counter = mipspmu.read_counter(n); \
1438 if (counter & mipspmu.overflow) { \
1439 handle_associated_event(cpuc, n, &data, regs); \
1440 handled = IRQ_HANDLED; \
1449 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1450 read_unlock(&pmuint_rwlock);
1452 resume_local_counters();
1455 * Do all the work for the pending perf events. We can do this
1456 * in here because the performance counter interrupt is a regular
1457 * interrupt, not NMI.
1459 if (handled == IRQ_HANDLED)
1465 static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1467 return mipsxx_pmu_handle_shared_irq();
1471 #define IS_BOTH_COUNTERS_24K_EVENT(b) \
1472 ((b) == 0 || (b) == 1 || (b) == 11)
1475 #define IS_BOTH_COUNTERS_34K_EVENT(b) \
1476 ((b) == 0 || (b) == 1 || (b) == 11)
1477 #ifdef CONFIG_MIPS_MT_SMP
1478 #define IS_RANGE_P_34K_EVENT(r, b) \
1479 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1480 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
1481 (r) == 176 || ((b) >= 50 && (b) <= 55) || \
1482 ((b) >= 64 && (b) <= 67))
1483 #define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1487 #define IS_BOTH_COUNTERS_74K_EVENT(b) \
1488 ((b) == 0 || (b) == 1)
1491 #define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \
1492 ((b) == 0 || (b) == 1)
1494 #define IS_BOTH_COUNTERS_P5600_EVENT(b) \
1495 ((b) == 0 || (b) == 1)
1498 #define IS_BOTH_COUNTERS_1004K_EVENT(b) \
1499 ((b) == 0 || (b) == 1 || (b) == 11)
1500 #ifdef CONFIG_MIPS_MT_SMP
1501 #define IS_RANGE_P_1004K_EVENT(r, b) \
1502 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1503 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
1504 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
1505 (r) == 188 || (b) == 61 || (b) == 62 || \
1506 ((b) >= 64 && (b) <= 67))
1507 #define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
1511 #define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b) \
1512 ((b) == 0 || (b) == 1 || (b) == 11)
1513 #ifdef CONFIG_MIPS_MT_SMP
1514 /* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
1515 #define IS_RANGE_P_INTERAPTIV_EVENT(r, b) \
1516 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1517 (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 || \
1518 (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 && \
1519 (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 || \
1520 ((b) >= 64 && (b) <= 67))
1521 #define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175)
1525 #define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
1526 ((b) == 0 || (b) == 1)
1530 * For most cores the user can use 0-255 raw events, where 0-127 for the events
1531 * of even counters, and 128-255 for odd counters. Note that bit 7 is used to
1532 * indicate the even/odd bank selector. So, for example, when user wants to take
1533 * the Event Num of 15 for odd counters (by referring to the user manual), then
1534 * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F)
1537 * Some newer cores have even more events, in which case the user can use raw
1538 * events 0-511, where 0-255 are for the events of even counters, and 256-511
1539 * are for odd counters, so bit 8 is used to indicate the even/odd bank selector.
1541 static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1543 /* currently most cores have 7-bit event numbers */
1544 unsigned int raw_id = config & 0xff;
1545 unsigned int base_id = raw_id & 0x7f;
1547 switch (current_cpu_type()) {
1549 if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1550 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1552 raw_event.cntr_mask =
1553 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1554 #ifdef CONFIG_MIPS_MT_SMP
1556 * This is actually doing nothing. Non-multithreading
1557 * CPUs will not check and calculate the range.
1559 raw_event.range = P;
1563 if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1564 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1566 raw_event.cntr_mask =
1567 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1568 #ifdef CONFIG_MIPS_MT_SMP
1569 if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1570 raw_event.range = P;
1571 else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1572 raw_event.range = V;
1574 raw_event.range = T;
1579 if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1580 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1582 raw_event.cntr_mask =
1583 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1584 #ifdef CONFIG_MIPS_MT_SMP
1585 raw_event.range = P;
1589 if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
1590 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1592 raw_event.cntr_mask =
1593 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1594 #ifdef CONFIG_MIPS_MT_SMP
1595 raw_event.range = P;
1601 /* 8-bit event numbers */
1602 raw_id = config & 0x1ff;
1603 base_id = raw_id & 0xff;
1604 if (IS_BOTH_COUNTERS_P5600_EVENT(base_id))
1605 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1607 raw_event.cntr_mask =
1608 raw_id > 255 ? CNTR_ODD : CNTR_EVEN;
1609 #ifdef CONFIG_MIPS_MT_SMP
1610 raw_event.range = P;
1614 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1615 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1617 raw_event.cntr_mask =
1618 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1619 #ifdef CONFIG_MIPS_MT_SMP
1620 if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1621 raw_event.range = P;
1622 else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1623 raw_event.range = V;
1625 raw_event.range = T;
1628 case CPU_INTERAPTIV:
1629 if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
1630 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1632 raw_event.cntr_mask =
1633 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1634 #ifdef CONFIG_MIPS_MT_SMP
1635 if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
1636 raw_event.range = P;
1637 else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
1638 raw_event.range = V;
1640 raw_event.range = T;
1644 if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
1645 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1647 raw_event.cntr_mask =
1648 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1651 raw_event.cntr_mask = raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1655 raw_event.event_id = base_id;
1660 static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1662 unsigned int raw_id = config & 0xff;
1663 unsigned int base_id = raw_id & 0x7f;
1666 raw_event.cntr_mask = CNTR_ALL;
1667 raw_event.event_id = base_id;
1669 if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
1671 return ERR_PTR(-EOPNOTSUPP);
1674 return ERR_PTR(-EOPNOTSUPP);
1685 return ERR_PTR(-EOPNOTSUPP);
1693 static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config)
1695 unsigned int raw_id = config & 0xff;
1697 /* Only 1-63 are defined */
1698 if ((raw_id < 0x01) || (raw_id > 0x3f))
1699 return ERR_PTR(-EOPNOTSUPP);
1701 raw_event.cntr_mask = CNTR_ALL;
1702 raw_event.event_id = raw_id;
1708 init_hw_perf_events(void)
1713 pr_info("Performance counters: ");
1715 counters = n_counters();
1716 if (counters == 0) {
1717 pr_cont("No available PMU.\n");
1721 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1722 cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
1723 if (!cpu_has_mipsmt_pertccounters)
1724 counters = counters_total_to_per_cpu(counters);
1727 if (get_c0_perfcount_int)
1728 irq = get_c0_perfcount_int();
1729 else if (cp0_perfcount_irq >= 0)
1730 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1734 mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1736 switch (current_cpu_type()) {
1738 mipspmu.name = "mips/24K";
1739 mipspmu.general_event_map = &mipsxxcore_event_map;
1740 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1743 mipspmu.name = "mips/34K";
1744 mipspmu.general_event_map = &mipsxxcore_event_map;
1745 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1748 mipspmu.name = "mips/74K";
1749 mipspmu.general_event_map = &mipsxxcore_event_map2;
1750 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1753 mipspmu.name = "mips/proAptiv";
1754 mipspmu.general_event_map = &mipsxxcore_event_map2;
1755 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1758 mipspmu.name = "mips/P5600";
1759 mipspmu.general_event_map = &mipsxxcore_event_map2;
1760 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1763 mipspmu.name = "mips/P6600";
1764 mipspmu.general_event_map = &mipsxxcore_event_map2;
1765 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1768 mipspmu.name = "mips/I6400";
1769 mipspmu.general_event_map = &i6400_event_map;
1770 mipspmu.cache_event_map = &i6400_cache_map;
1773 mipspmu.name = "mips/1004K";
1774 mipspmu.general_event_map = &mipsxxcore_event_map;
1775 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1778 mipspmu.name = "mips/1074K";
1779 mipspmu.general_event_map = &mipsxxcore_event_map;
1780 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1782 case CPU_INTERAPTIV:
1783 mipspmu.name = "mips/interAptiv";
1784 mipspmu.general_event_map = &mipsxxcore_event_map;
1785 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1788 mipspmu.name = "mips/loongson1";
1789 mipspmu.general_event_map = &mipsxxcore_event_map;
1790 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1793 mipspmu.name = "mips/loongson3";
1794 mipspmu.general_event_map = &loongson3_event_map;
1795 mipspmu.cache_event_map = &loongson3_cache_map;
1797 case CPU_CAVIUM_OCTEON:
1798 case CPU_CAVIUM_OCTEON_PLUS:
1799 case CPU_CAVIUM_OCTEON2:
1800 mipspmu.name = "octeon";
1801 mipspmu.general_event_map = &octeon_event_map;
1802 mipspmu.cache_event_map = &octeon_cache_map;
1803 mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1806 mipspmu.name = "BMIPS5000";
1807 mipspmu.general_event_map = &bmips5000_event_map;
1808 mipspmu.cache_event_map = &bmips5000_cache_map;
1811 mipspmu.name = "xlp";
1812 mipspmu.general_event_map = &xlp_event_map;
1813 mipspmu.cache_event_map = &xlp_cache_map;
1814 mipspmu.map_raw_event = xlp_pmu_map_raw_event;
1817 pr_cont("Either hardware does not support performance "
1818 "counters, or not yet implemented.\n");
1822 mipspmu.num_counters = counters;
1825 if (read_c0_perfctrl0() & MIPS_PERFCTRL_W) {
1826 mipspmu.max_period = (1ULL << 63) - 1;
1827 mipspmu.valid_count = (1ULL << 63) - 1;
1828 mipspmu.overflow = 1ULL << 63;
1829 mipspmu.read_counter = mipsxx_pmu_read_counter_64;
1830 mipspmu.write_counter = mipsxx_pmu_write_counter_64;
1833 mipspmu.max_period = (1ULL << 31) - 1;
1834 mipspmu.valid_count = (1ULL << 31) - 1;
1835 mipspmu.overflow = 1ULL << 31;
1836 mipspmu.read_counter = mipsxx_pmu_read_counter;
1837 mipspmu.write_counter = mipsxx_pmu_write_counter;
1841 on_each_cpu(reset_counters, (void *)(long)counters, 1);
1843 pr_cont("%s PMU enabled, %d %d-bit counters available to each "
1844 "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
1845 irq < 0 ? " (share with timer interrupt)" : "");
1847 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1851 early_initcall(init_hw_perf_events);