4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
9 * This code is based on the sparc64 perf event code, which is in turn based
12 #define pr_fmt(fmt) "hw perfevents: " fmt
14 #include <linux/bitmap.h>
15 #include <linux/cpumask.h>
16 #include <linux/cpu_pm.h>
17 #include <linux/export.h>
18 #include <linux/kernel.h>
19 #include <linux/of_device.h>
20 #include <linux/perf/arm_pmu.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
23 #include <linux/sched/clock.h>
24 #include <linux/spinlock.h>
25 #include <linux/irq.h>
26 #include <linux/irqdesc.h>
28 #include <asm/cputype.h>
29 #include <asm/irq_regs.h>
32 armpmu_map_cache_event(const unsigned (*cache_map)
33 [PERF_COUNT_HW_CACHE_MAX]
34 [PERF_COUNT_HW_CACHE_OP_MAX]
35 [PERF_COUNT_HW_CACHE_RESULT_MAX],
38 unsigned int cache_type, cache_op, cache_result, ret;
40 cache_type = (config >> 0) & 0xff;
41 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
44 cache_op = (config >> 8) & 0xff;
45 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
48 cache_result = (config >> 16) & 0xff;
49 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
52 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
54 if (ret == CACHE_OP_UNSUPPORTED)
61 armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
65 if (config >= PERF_COUNT_HW_MAX)
68 mapping = (*event_map)[config];
69 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
73 armpmu_map_raw_event(u32 raw_event_mask, u64 config)
75 return (int)(config & raw_event_mask);
79 armpmu_map_event(struct perf_event *event,
80 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
81 const unsigned (*cache_map)
82 [PERF_COUNT_HW_CACHE_MAX]
83 [PERF_COUNT_HW_CACHE_OP_MAX]
84 [PERF_COUNT_HW_CACHE_RESULT_MAX],
87 u64 config = event->attr.config;
88 int type = event->attr.type;
90 if (type == event->pmu->type)
91 return armpmu_map_raw_event(raw_event_mask, config);
94 case PERF_TYPE_HARDWARE:
95 return armpmu_map_hw_event(event_map, config);
96 case PERF_TYPE_HW_CACHE:
97 return armpmu_map_cache_event(cache_map, config);
99 return armpmu_map_raw_event(raw_event_mask, config);
105 int armpmu_event_set_period(struct perf_event *event)
107 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
108 struct hw_perf_event *hwc = &event->hw;
109 s64 left = local64_read(&hwc->period_left);
110 s64 period = hwc->sample_period;
113 if (unlikely(left <= -period)) {
115 local64_set(&hwc->period_left, left);
116 hwc->last_period = period;
120 if (unlikely(left <= 0)) {
122 local64_set(&hwc->period_left, left);
123 hwc->last_period = period;
128 * Limit the maximum period to prevent the counter value
129 * from overtaking the one we are about to program. In
130 * effect we are reducing max_period to account for
131 * interrupt latency (and we are being very conservative).
133 if (left > (armpmu->max_period >> 1))
134 left = armpmu->max_period >> 1;
136 local64_set(&hwc->prev_count, (u64)-left);
138 armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
140 perf_event_update_userpage(event);
145 u64 armpmu_event_update(struct perf_event *event)
147 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
148 struct hw_perf_event *hwc = &event->hw;
149 u64 delta, prev_raw_count, new_raw_count;
152 prev_raw_count = local64_read(&hwc->prev_count);
153 new_raw_count = armpmu->read_counter(event);
155 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
156 new_raw_count) != prev_raw_count)
159 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
161 local64_add(delta, &event->count);
162 local64_sub(delta, &hwc->period_left);
164 return new_raw_count;
168 armpmu_read(struct perf_event *event)
170 armpmu_event_update(event);
174 armpmu_stop(struct perf_event *event, int flags)
176 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
177 struct hw_perf_event *hwc = &event->hw;
180 * ARM pmu always has to update the counter, so ignore
181 * PERF_EF_UPDATE, see comments in armpmu_start().
183 if (!(hwc->state & PERF_HES_STOPPED)) {
184 armpmu->disable(event);
185 armpmu_event_update(event);
186 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
190 static void armpmu_start(struct perf_event *event, int flags)
192 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
193 struct hw_perf_event *hwc = &event->hw;
196 * ARM pmu always has to reprogram the period, so ignore
197 * PERF_EF_RELOAD, see the comment below.
199 if (flags & PERF_EF_RELOAD)
200 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
204 * Set the period again. Some counters can't be stopped, so when we
205 * were stopped we simply disabled the IRQ source and the counter
206 * may have been left counting. If we don't do this step then we may
207 * get an interrupt too soon or *way* too late if the overflow has
208 * happened since disabling.
210 armpmu_event_set_period(event);
211 armpmu->enable(event);
215 armpmu_del(struct perf_event *event, int flags)
217 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
218 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
219 struct hw_perf_event *hwc = &event->hw;
222 armpmu_stop(event, PERF_EF_UPDATE);
223 hw_events->events[idx] = NULL;
224 clear_bit(idx, hw_events->used_mask);
225 if (armpmu->clear_event_idx)
226 armpmu->clear_event_idx(hw_events, event);
228 perf_event_update_userpage(event);
232 armpmu_add(struct perf_event *event, int flags)
234 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
235 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
236 struct hw_perf_event *hwc = &event->hw;
240 /* An event following a process won't be stopped earlier */
241 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
244 perf_pmu_disable(event->pmu);
246 /* If we don't have a space for the counter then finish early. */
247 idx = armpmu->get_event_idx(hw_events, event);
254 * If there is an event in the counter we are going to use then make
255 * sure it is disabled.
258 armpmu->disable(event);
259 hw_events->events[idx] = event;
261 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
262 if (flags & PERF_EF_START)
263 armpmu_start(event, PERF_EF_RELOAD);
265 /* Propagate our changes to the userspace mapping. */
266 perf_event_update_userpage(event);
269 perf_pmu_enable(event->pmu);
274 validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
275 struct perf_event *event)
277 struct arm_pmu *armpmu;
279 if (is_software_event(event))
283 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
284 * core perf code won't check that the pmu->ctx == leader->ctx
285 * until after pmu->event_init(event).
287 if (event->pmu != pmu)
290 if (event->state < PERF_EVENT_STATE_OFF)
293 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
296 armpmu = to_arm_pmu(event->pmu);
297 return armpmu->get_event_idx(hw_events, event) >= 0;
301 validate_group(struct perf_event *event)
303 struct perf_event *sibling, *leader = event->group_leader;
304 struct pmu_hw_events fake_pmu;
307 * Initialise the fake PMU. We only need to populate the
308 * used_mask for the purposes of validation.
310 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
312 if (!validate_event(event->pmu, &fake_pmu, leader))
315 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
316 if (!validate_event(event->pmu, &fake_pmu, sibling))
320 if (!validate_event(event->pmu, &fake_pmu, event))
326 static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
328 struct arm_pmu *armpmu;
329 struct platform_device *plat_device;
330 struct arm_pmu_platdata *plat;
332 u64 start_clock, finish_clock;
335 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
336 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
337 * do any necessary shifting, we just need to perform the first
340 armpmu = *(void **)dev;
341 plat_device = armpmu->plat_device;
342 plat = dev_get_platdata(&plat_device->dev);
344 start_clock = sched_clock();
345 if (plat && plat->handle_irq)
346 ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
348 ret = armpmu->handle_irq(irq, armpmu);
349 finish_clock = sched_clock();
351 perf_sample_event_took(finish_clock - start_clock);
356 event_requires_mode_exclusion(struct perf_event_attr *attr)
358 return attr->exclude_idle || attr->exclude_user ||
359 attr->exclude_kernel || attr->exclude_hv;
363 __hw_perf_event_init(struct perf_event *event)
365 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
366 struct hw_perf_event *hwc = &event->hw;
369 mapping = armpmu->map_event(event);
372 pr_debug("event %x:%llx not supported\n", event->attr.type,
378 * We don't assign an index until we actually place the event onto
379 * hardware. Use -1 to signify that we haven't decided where to put it
380 * yet. For SMP systems, each core has it's own PMU so we can't do any
381 * clever allocation or constraints checking at this point.
384 hwc->config_base = 0;
389 * Check whether we need to exclude the counter from certain modes.
391 if ((!armpmu->set_event_filter ||
392 armpmu->set_event_filter(hwc, &event->attr)) &&
393 event_requires_mode_exclusion(&event->attr)) {
394 pr_debug("ARM performance counters do not support "
400 * Store the event encoding into the config_base field.
402 hwc->config_base |= (unsigned long)mapping;
404 if (!is_sampling_event(event)) {
406 * For non-sampling runs, limit the sample_period to half
407 * of the counter width. That way, the new counter value
408 * is far less likely to overtake the previous one unless
409 * you have some serious IRQ latency issues.
411 hwc->sample_period = armpmu->max_period >> 1;
412 hwc->last_period = hwc->sample_period;
413 local64_set(&hwc->period_left, hwc->sample_period);
416 if (event->group_leader != event) {
417 if (validate_group(event) != 0)
424 static int armpmu_event_init(struct perf_event *event)
426 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
429 * Reject CPU-affine events for CPUs that are of a different class to
430 * that which this PMU handles. Process-following events (where
431 * event->cpu == -1) can be migrated between CPUs, and thus we have to
432 * reject them later (in armpmu_add) if they're scheduled on a
433 * different class of CPU.
435 if (event->cpu != -1 &&
436 !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
439 /* does not support taken branch sampling */
440 if (has_branch_stack(event))
443 if (armpmu->map_event(event) == -ENOENT)
446 return __hw_perf_event_init(event);
449 static void armpmu_enable(struct pmu *pmu)
451 struct arm_pmu *armpmu = to_arm_pmu(pmu);
452 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
453 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
455 /* For task-bound events we may be called on other CPUs */
456 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
460 armpmu->start(armpmu);
463 static void armpmu_disable(struct pmu *pmu)
465 struct arm_pmu *armpmu = to_arm_pmu(pmu);
467 /* For task-bound events we may be called on other CPUs */
468 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
471 armpmu->stop(armpmu);
475 * In heterogeneous systems, events are specific to a particular
476 * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
477 * the same microarchitecture.
479 static int armpmu_filter_match(struct perf_event *event)
481 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
482 unsigned int cpu = smp_processor_id();
483 return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
486 static ssize_t armpmu_cpumask_show(struct device *dev,
487 struct device_attribute *attr, char *buf)
489 struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
490 return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
493 static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL);
495 static struct attribute *armpmu_common_attrs[] = {
500 static struct attribute_group armpmu_common_attr_group = {
501 .attrs = armpmu_common_attrs,
504 static void armpmu_init(struct arm_pmu *armpmu)
506 armpmu->pmu = (struct pmu) {
507 .pmu_enable = armpmu_enable,
508 .pmu_disable = armpmu_disable,
509 .event_init = armpmu_event_init,
512 .start = armpmu_start,
515 .filter_match = armpmu_filter_match,
516 .attr_groups = armpmu->attr_groups,
518 armpmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
519 &armpmu_common_attr_group;
522 /* Set at runtime when we know what CPU type we are. */
523 static struct arm_pmu *__oprofile_cpu_pmu;
526 * Despite the names, these two functions are CPU-specific and are used
527 * by the OProfile/perf code.
529 const char *perf_pmu_name(void)
531 if (!__oprofile_cpu_pmu)
534 return __oprofile_cpu_pmu->name;
536 EXPORT_SYMBOL_GPL(perf_pmu_name);
538 int perf_num_counters(void)
542 if (__oprofile_cpu_pmu != NULL)
543 max_events = __oprofile_cpu_pmu->num_events;
547 EXPORT_SYMBOL_GPL(perf_num_counters);
549 static void cpu_pmu_free_irqs(struct arm_pmu *cpu_pmu)
552 struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
554 for_each_cpu(cpu, &cpu_pmu->supported_cpus) {
555 int irq = per_cpu(hw_events->irq, cpu);
559 if (irq_is_percpu(irq)) {
560 free_percpu_irq(irq, &hw_events->percpu_pmu);
564 if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
567 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
571 static int cpu_pmu_request_irqs(struct arm_pmu *cpu_pmu, irq_handler_t handler)
574 struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
576 for_each_cpu(cpu, &cpu_pmu->supported_cpus) {
577 int irq = per_cpu(hw_events->irq, cpu);
581 if (irq_is_percpu(irq)) {
582 err = request_percpu_irq(irq, handler, "arm-pmu",
583 &hw_events->percpu_pmu);
585 pr_err("unable to request IRQ%d for ARM PMU counters\n",
592 err = request_irq(irq, handler,
593 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
594 per_cpu_ptr(&hw_events->percpu_pmu, cpu));
596 pr_err("unable to request IRQ%d for ARM PMU counters\n",
601 cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
607 static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
609 struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
610 return per_cpu(hw_events->irq, cpu);
614 * PMU hardware loses all context when a CPU goes offline.
615 * When a CPU is hotplugged back in, since some hardware registers are
616 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
617 * junk values out of them.
619 static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
621 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
624 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
629 irq = armpmu_get_cpu_irq(pmu, cpu);
631 if (irq_is_percpu(irq)) {
632 enable_percpu_irq(irq, IRQ_TYPE_NONE);
636 if (irq_force_affinity(irq, cpumask_of(cpu)) &&
637 num_possible_cpus() > 1) {
638 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
646 static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
648 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
651 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
654 irq = armpmu_get_cpu_irq(pmu, cpu);
655 if (irq && irq_is_percpu(irq))
656 disable_percpu_irq(irq);
662 static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
664 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
665 struct perf_event *event;
668 for (idx = 0; idx < armpmu->num_events; idx++) {
670 * If the counter is not used skip it, there is no
671 * need of stopping/restarting it.
673 if (!test_bit(idx, hw_events->used_mask))
676 event = hw_events->events[idx];
681 * Stop and update the counter
683 armpmu_stop(event, PERF_EF_UPDATE);
686 case CPU_PM_ENTER_FAILED:
688 * Restore and enable the counter.
689 * armpmu_start() indirectly calls
691 * perf_event_update_userpage()
693 * that requires RCU read locking to be functional,
694 * wrap the call within RCU_NONIDLE to make the
695 * RCU subsystem aware this cpu is not idle from
696 * an RCU perspective for the armpmu_start() call
699 RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
707 static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
710 struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
711 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
712 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
714 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
718 * Always reset the PMU registers on power-up even if
719 * there are no events running.
721 if (cmd == CPU_PM_EXIT && armpmu->reset)
722 armpmu->reset(armpmu);
729 armpmu->stop(armpmu);
730 cpu_pm_pmu_setup(armpmu, cmd);
733 cpu_pm_pmu_setup(armpmu, cmd);
734 case CPU_PM_ENTER_FAILED:
735 armpmu->start(armpmu);
744 static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
746 cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
747 return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
750 static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
752 cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
755 static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
756 static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
759 static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
763 err = cpu_pmu_request_irqs(cpu_pmu, armpmu_dispatch_irq);
767 err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING,
772 err = cpu_pm_pmu_register(cpu_pmu);
777 * This is a CPU PMU potentially in a heterogeneous configuration (e.g.
778 * big.LITTLE). This is not an uncore PMU, and we have taken ctx
779 * sharing into account (e.g. with our pmu::filter_match callback and
780 * pmu::event_init group validation).
782 cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS;
787 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
790 cpu_pmu_free_irqs(cpu_pmu);
794 static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
796 cpu_pm_pmu_unregister(cpu_pmu);
797 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
802 * CPU PMU identification and probing.
804 static int probe_current_pmu(struct arm_pmu *pmu,
805 const struct pmu_probe_info *info)
808 unsigned int cpuid = read_cpuid_id();
811 pr_info("probing PMU on CPU %d\n", cpu);
813 for (; info->init != NULL; info++) {
814 if ((cpuid & info->mask) != info->cpuid)
816 ret = info->init(pmu);
824 static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq)
827 struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
829 ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
833 for_each_cpu(cpu, &pmu->supported_cpus)
834 per_cpu(hw_events->irq, cpu) = irq;
839 static bool pmu_has_irq_affinity(struct device_node *node)
841 return !!of_find_property(node, "interrupt-affinity", NULL);
844 static int pmu_parse_irq_affinity(struct device_node *node, int i)
846 struct device_node *dn;
850 * If we don't have an interrupt-affinity property, we guess irq
851 * affinity matches our logical CPU order, as we used to assume.
852 * This is fragile, so we'll warn in pmu_parse_irqs().
854 if (!pmu_has_irq_affinity(node))
857 dn = of_parse_phandle(node, "interrupt-affinity", i);
859 pr_warn("failed to parse interrupt-affinity[%d] for %s\n",
864 /* Now look up the logical CPU number */
865 for_each_possible_cpu(cpu) {
866 struct device_node *cpu_dn;
868 cpu_dn = of_cpu_device_node_get(cpu);
875 if (cpu >= nr_cpu_ids) {
876 pr_warn("failed to find logical CPU for %s\n", dn->name);
884 static int pmu_parse_irqs(struct arm_pmu *pmu)
887 struct platform_device *pdev = pmu->plat_device;
888 struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
890 irqs = platform_irq_count(pdev);
892 pr_err("unable to count PMU IRQs\n");
897 * In this case we have no idea which CPUs are covered by the PMU.
898 * To match our prior behaviour, we assume all CPUs in this case.
901 pr_warn("no irqs for PMU, sampling events not supported\n");
902 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
903 cpumask_setall(&pmu->supported_cpus);
908 int irq = platform_get_irq(pdev, 0);
909 if (irq && irq_is_percpu(irq))
910 return pmu_parse_percpu_irq(pmu, irq);
913 if (!pmu_has_irq_affinity(pdev->dev.of_node)) {
914 pr_warn("no interrupt-affinity property for %s, guessing.\n",
915 of_node_full_name(pdev->dev.of_node));
919 * Some platforms have all PMU IRQs OR'd into a single IRQ, with a
920 * special platdata function that attempts to demux them.
922 if (dev_get_platdata(&pdev->dev))
923 cpumask_setall(&pmu->supported_cpus);
925 for (i = 0; i < irqs; i++) {
928 irq = platform_get_irq(pdev, i);
929 if (WARN_ON(irq <= 0))
932 if (irq_is_percpu(irq)) {
933 pr_warn("multiple PPIs or mismatched SPI/PPI detected\n");
937 cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i);
940 if (cpu >= nr_cpu_ids)
943 if (per_cpu(hw_events->irq, cpu)) {
944 pr_warn("multiple PMU IRQs for the same CPU detected\n");
948 per_cpu(hw_events->irq, cpu) = irq;
949 cpumask_set_cpu(cpu, &pmu->supported_cpus);
955 static struct arm_pmu *armpmu_alloc(void)
960 pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
962 pr_info("failed to allocate PMU device!\n");
966 pmu->hw_events = alloc_percpu(struct pmu_hw_events);
967 if (!pmu->hw_events) {
968 pr_info("failed to allocate per-cpu PMU data.\n");
972 for_each_possible_cpu(cpu) {
973 struct pmu_hw_events *events;
975 events = per_cpu_ptr(pmu->hw_events, cpu);
976 raw_spin_lock_init(&events->pmu_lock);
977 events->percpu_pmu = pmu;
988 static void armpmu_free(struct arm_pmu *pmu)
990 free_percpu(pmu->hw_events);
994 int arm_pmu_device_probe(struct platform_device *pdev,
995 const struct of_device_id *of_table,
996 const struct pmu_probe_info *probe_table)
998 const struct of_device_id *of_id;
999 const int (*init_fn)(struct arm_pmu *);
1000 struct device_node *node = pdev->dev.of_node;
1001 struct arm_pmu *pmu;
1004 pmu = armpmu_alloc();
1010 pmu->plat_device = pdev;
1012 ret = pmu_parse_irqs(pmu);
1016 if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
1017 init_fn = of_id->data;
1019 pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
1020 "secure-reg-access");
1022 /* arm64 systems boot only as non-secure */
1023 if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
1024 pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
1025 pmu->secure_access = false;
1029 } else if (probe_table) {
1030 cpumask_setall(&pmu->supported_cpus);
1031 ret = probe_current_pmu(pmu, probe_table);
1035 pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
1040 ret = cpu_pmu_init(pmu);
1044 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
1048 if (!__oprofile_cpu_pmu)
1049 __oprofile_cpu_pmu = pmu;
1051 pr_info("enabled with %s PMU driver, %d counters available\n",
1052 pmu->name, pmu->num_events);
1057 cpu_pmu_destroy(pmu);
1059 pr_info("%s: failed to register PMU devices!\n",
1060 of_node_full_name(node));
1065 static int arm_pmu_hp_init(void)
1069 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
1070 "perf/arm/pmu:starting",
1071 arm_perf_starting_cpu,
1072 arm_perf_teardown_cpu);
1074 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
1078 subsys_initcall(arm_pmu_hp_init);