2 * CCI cache coherent interconnect driver
4 * Copyright (C) 2013 ARM Ltd.
5 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <linux/arm-cci.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/perf_event.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
29 #include <asm/cacheflush.h>
30 #include <asm/smp_plat.h>
32 static void __iomem *cci_ctrl_base;
33 static unsigned long cci_ctrl_phys;
35 #ifdef CONFIG_ARM_CCI400_PORT_CTRL
38 unsigned int nb_ace_lite;
41 static const struct cci_nb_ports cci400_ports = {
46 #define CCI400_PORTS_DATA (&cci400_ports)
48 #define CCI400_PORTS_DATA (NULL)
51 static const struct of_device_id arm_cci_matches[] = {
52 #ifdef CONFIG_ARM_CCI400_COMMON
53 {.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA },
55 #ifdef CONFIG_ARM_CCI500_PMU
56 { .compatible = "arm,cci-500", },
61 #ifdef CONFIG_ARM_CCI_PMU
63 #define DRIVER_NAME "ARM-CCI"
64 #define DRIVER_NAME_PMU DRIVER_NAME " PMU"
66 #define CCI_PMCR 0x0100
67 #define CCI_PID2 0x0fe8
69 #define CCI_PMCR_CEN 0x00000001
70 #define CCI_PMCR_NCNT_MASK 0x0000f800
71 #define CCI_PMCR_NCNT_SHIFT 11
73 #define CCI_PID2_REV_MASK 0xf0
74 #define CCI_PID2_REV_SHIFT 4
76 #define CCI_PMU_EVT_SEL 0x000
77 #define CCI_PMU_CNTR 0x004
78 #define CCI_PMU_CNTR_CTRL 0x008
79 #define CCI_PMU_OVRFLW 0x00c
81 #define CCI_PMU_OVRFLW_FLAG 1
83 #define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size)
84 #define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model))
85 #define CCI_PMU_CNTR_MASK ((1ULL << 32) -1)
86 #define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1)
88 #define CCI_PMU_MAX_HW_CNTRS(model) \
89 ((model)->num_hw_cntrs + (model)->fixed_hw_cntrs)
91 /* Types of interfaces that can generate events */
95 #ifdef CONFIG_ARM_CCI500_PMU
106 struct cci_pmu_hw_events {
107 struct perf_event **events;
108 unsigned long *used_mask;
109 raw_spinlock_t pmu_lock;
114 * struct cci_pmu_model:
115 * @fixed_hw_cntrs - Number of fixed event counters
116 * @num_hw_cntrs - Maximum number of programmable event counters
117 * @cntr_size - Size of an event counter mapping
119 struct cci_pmu_model {
124 struct attribute **format_attrs;
125 struct attribute **event_attrs;
126 struct event_range event_ranges[CCI_IF_MAX];
127 int (*validate_hw_event)(struct cci_pmu *, unsigned long);
128 int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long);
131 static struct cci_pmu_model cci_pmu_models[];
138 unsigned long active_irqs;
139 const struct cci_pmu_model *model;
140 struct cci_pmu_hw_events hw_events;
141 struct platform_device *plat_device;
143 atomic_t active_events;
144 struct mutex reserve_mutex;
145 struct notifier_block cpu_nb;
149 #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
152 #ifdef CONFIG_ARM_CCI400_PMU
156 #ifdef CONFIG_ARM_CCI500_PMU
162 static void pmu_write_counters(struct cci_pmu *cci_pmu,
163 unsigned long *mask);
164 static ssize_t cci_pmu_format_show(struct device *dev,
165 struct device_attribute *attr, char *buf);
166 static ssize_t cci_pmu_event_show(struct device *dev,
167 struct device_attribute *attr, char *buf);
169 #define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \
170 &((struct dev_ext_attribute[]) { \
171 { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config } \
174 #define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \
175 CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config)
176 #define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \
177 CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config)
179 /* CCI400 PMU Specific definitions */
181 #ifdef CONFIG_ARM_CCI400_PMU
184 #define CCI400_PORT_S0 0
185 #define CCI400_PORT_S1 1
186 #define CCI400_PORT_S2 2
187 #define CCI400_PORT_S3 3
188 #define CCI400_PORT_S4 4
189 #define CCI400_PORT_M0 5
190 #define CCI400_PORT_M1 6
191 #define CCI400_PORT_M2 7
193 #define CCI400_R1_PX 5
196 * Instead of an event id to monitor CCI cycles, a dedicated counter is
197 * provided. Use 0xff to represent CCI cycles and hope that no future revisions
198 * make use of this event in hardware.
200 enum cci400_perf_events {
201 CCI400_PMU_CYCLES = 0xff
204 #define CCI400_PMU_CYCLE_CNTR_IDX 0
205 #define CCI400_PMU_CNTR0_IDX 1
208 * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
209 * ports and bits 4:0 are event codes. There are different event codes
210 * associated with each port type.
212 * Additionally, the range of events associated with the port types changed
213 * between Rev0 and Rev1.
215 * The constants below define the range of valid codes for each port type for
216 * the different revisions and are used to validate the event to be monitored.
219 #define CCI400_PMU_EVENT_MASK 0xffUL
220 #define CCI400_PMU_EVENT_SOURCE_SHIFT 5
221 #define CCI400_PMU_EVENT_SOURCE_MASK 0x7
222 #define CCI400_PMU_EVENT_CODE_SHIFT 0
223 #define CCI400_PMU_EVENT_CODE_MASK 0x1f
224 #define CCI400_PMU_EVENT_SOURCE(event) \
225 ((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \
226 CCI400_PMU_EVENT_SOURCE_MASK)
227 #define CCI400_PMU_EVENT_CODE(event) \
228 ((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK)
230 #define CCI400_R0_SLAVE_PORT_MIN_EV 0x00
231 #define CCI400_R0_SLAVE_PORT_MAX_EV 0x13
232 #define CCI400_R0_MASTER_PORT_MIN_EV 0x14
233 #define CCI400_R0_MASTER_PORT_MAX_EV 0x1a
235 #define CCI400_R1_SLAVE_PORT_MIN_EV 0x00
236 #define CCI400_R1_SLAVE_PORT_MAX_EV 0x14
237 #define CCI400_R1_MASTER_PORT_MIN_EV 0x00
238 #define CCI400_R1_MASTER_PORT_MAX_EV 0x11
240 #define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \
241 CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \
242 (unsigned long)_config)
244 static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
245 struct device_attribute *attr, char *buf);
247 static struct attribute *cci400_pmu_format_attrs[] = {
248 CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
249 CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"),
253 static struct attribute *cci400_r0_pmu_event_attrs[] = {
255 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
256 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
257 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2),
258 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3),
259 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4),
260 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5),
261 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6),
262 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
263 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8),
264 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9),
265 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA),
266 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB),
267 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC),
268 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD),
269 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE),
270 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF),
271 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10),
272 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11),
273 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12),
274 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13),
276 CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x14),
277 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard, 0x15),
278 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard, 0x16),
279 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full, 0x17),
280 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x18),
281 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x19),
282 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A),
283 /* Special event for cycles counter */
284 CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
288 static struct attribute *cci400_r1_pmu_event_attrs[] = {
290 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
291 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
292 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2),
293 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3),
294 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4),
295 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5),
296 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6),
297 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
298 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8),
299 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9),
300 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA),
301 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB),
302 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC),
303 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD),
304 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE),
305 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF),
306 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10),
307 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11),
308 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12),
309 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13),
310 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard, 0x14),
312 CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x0),
313 CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard, 0x1),
314 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard, 0x2),
315 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full, 0x3),
316 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x4),
317 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x5),
318 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full, 0x6),
319 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full, 0x7),
320 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full, 0x8),
321 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0, 0x9),
322 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1, 0xA),
323 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2, 0xB),
324 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3, 0xC),
325 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0, 0xD),
326 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1, 0xE),
327 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2, 0xF),
328 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3, 0x10),
329 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11),
330 /* Special event for cycles counter */
331 CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
335 static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
336 struct device_attribute *attr, char *buf)
338 struct dev_ext_attribute *eattr = container_of(attr,
339 struct dev_ext_attribute, attr);
340 return snprintf(buf, PAGE_SIZE, "config=0x%lx\n", (unsigned long)eattr->var);
343 static int cci400_get_event_idx(struct cci_pmu *cci_pmu,
344 struct cci_pmu_hw_events *hw,
345 unsigned long cci_event)
349 /* cycles event idx is fixed */
350 if (cci_event == CCI400_PMU_CYCLES) {
351 if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask))
354 return CCI400_PMU_CYCLE_CNTR_IDX;
357 for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
358 if (!test_and_set_bit(idx, hw->used_mask))
361 /* No counters available */
365 static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event)
367 u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event);
368 u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event);
371 if (hw_event & ~CCI400_PMU_EVENT_MASK)
374 if (hw_event == CCI400_PMU_CYCLES)
383 /* Slave Interface */
384 if_type = CCI_IF_SLAVE;
389 /* Master Interface */
390 if_type = CCI_IF_MASTER;
396 if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
397 ev_code <= cci_pmu->model->event_ranges[if_type].max)
403 static int probe_cci400_revision(void)
406 rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
407 rev >>= CCI_PID2_REV_SHIFT;
409 if (rev < CCI400_R1_PX)
415 static const struct cci_pmu_model *probe_cci_model(struct platform_device *pdev)
417 if (platform_has_secure_cci_access())
418 return &cci_pmu_models[probe_cci400_revision()];
421 #else /* !CONFIG_ARM_CCI400_PMU */
422 static inline struct cci_pmu_model *probe_cci_model(struct platform_device *pdev)
426 #endif /* CONFIG_ARM_CCI400_PMU */
428 #ifdef CONFIG_ARM_CCI500_PMU
431 * CCI500 provides 8 independent event counters that can count
432 * any of the events available.
434 * CCI500 PMU event id is an 9-bit value made of two parts.
435 * bits [8:5] - Source for the event
436 * 0x0-0x6 - Slave interfaces
437 * 0x8-0xD - Master interfaces
438 * 0xf - Global Events
441 * bits [4:0] - Event code (specific to type of interface)
445 #define CCI500_PORT_S0 0x0
446 #define CCI500_PORT_S1 0x1
447 #define CCI500_PORT_S2 0x2
448 #define CCI500_PORT_S3 0x3
449 #define CCI500_PORT_S4 0x4
450 #define CCI500_PORT_S5 0x5
451 #define CCI500_PORT_S6 0x6
453 #define CCI500_PORT_M0 0x8
454 #define CCI500_PORT_M1 0x9
455 #define CCI500_PORT_M2 0xa
456 #define CCI500_PORT_M3 0xb
457 #define CCI500_PORT_M4 0xc
458 #define CCI500_PORT_M5 0xd
460 #define CCI500_PORT_GLOBAL 0xf
462 #define CCI500_PMU_EVENT_MASK 0x1ffUL
463 #define CCI500_PMU_EVENT_SOURCE_SHIFT 0x5
464 #define CCI500_PMU_EVENT_SOURCE_MASK 0xf
465 #define CCI500_PMU_EVENT_CODE_SHIFT 0x0
466 #define CCI500_PMU_EVENT_CODE_MASK 0x1f
468 #define CCI500_PMU_EVENT_SOURCE(event) \
469 ((event >> CCI500_PMU_EVENT_SOURCE_SHIFT) & CCI500_PMU_EVENT_SOURCE_MASK)
470 #define CCI500_PMU_EVENT_CODE(event) \
471 ((event >> CCI500_PMU_EVENT_CODE_SHIFT) & CCI500_PMU_EVENT_CODE_MASK)
473 #define CCI500_SLAVE_PORT_MIN_EV 0x00
474 #define CCI500_SLAVE_PORT_MAX_EV 0x1f
475 #define CCI500_MASTER_PORT_MIN_EV 0x00
476 #define CCI500_MASTER_PORT_MAX_EV 0x06
477 #define CCI500_GLOBAL_PORT_MIN_EV 0x00
478 #define CCI500_GLOBAL_PORT_MAX_EV 0x0f
481 #define CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \
482 CCI_EXT_ATTR_ENTRY(_name, cci500_pmu_global_event_show, \
483 (unsigned long) _config)
485 static ssize_t cci500_pmu_global_event_show(struct device *dev,
486 struct device_attribute *attr, char *buf);
488 static struct attribute *cci500_pmu_format_attrs[] = {
489 CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
490 CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"),
494 static struct attribute *cci500_pmu_event_attrs[] = {
496 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0),
497 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1),
498 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable, 0x2),
499 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc, 0x3),
500 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc, 0x4),
501 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate, 0x5),
502 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint, 0x6),
503 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
504 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval, 0x8),
505 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop, 0x9),
506 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid, 0xA),
507 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev, 0xB),
508 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable, 0xC),
509 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb, 0xD),
510 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu, 0xE),
511 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique, 0xF),
512 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict, 0x10),
513 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict, 0x11),
514 CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat, 0x12),
515 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid, 0x13),
516 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read, 0x14),
517 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean, 0x15),
518 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low, 0x16),
519 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid, 0x17),
520 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall, 0x18),
521 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall, 0x19),
522 CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall, 0x1A),
523 CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall, 0x1B),
524 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall, 0x1C),
525 CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall, 0x1D),
526 CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit, 0x1E),
527 CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit, 0x1F),
530 CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any, 0x0),
531 CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any, 0x1),
532 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall, 0x2),
533 CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall, 0x3),
534 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall, 0x4),
535 CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall, 0x5),
536 CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6),
539 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0),
540 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1),
541 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2),
542 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3),
543 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4),
544 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5),
545 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6),
546 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7),
547 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8),
548 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9),
549 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA),
550 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB),
551 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC),
552 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD),
553 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snopp_rq_stall_tt_full, 0xE),
554 CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF),
558 static ssize_t cci500_pmu_global_event_show(struct device *dev,
559 struct device_attribute *attr, char *buf)
561 struct dev_ext_attribute *eattr = container_of(attr,
562 struct dev_ext_attribute, attr);
563 /* Global events have single fixed source code */
564 return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n",
565 (unsigned long)eattr->var, CCI500_PORT_GLOBAL);
568 static int cci500_validate_hw_event(struct cci_pmu *cci_pmu,
569 unsigned long hw_event)
571 u32 ev_source = CCI500_PMU_EVENT_SOURCE(hw_event);
572 u32 ev_code = CCI500_PMU_EVENT_CODE(hw_event);
575 if (hw_event & ~CCI500_PMU_EVENT_MASK)
586 if_type = CCI_IF_SLAVE;
594 if_type = CCI_IF_MASTER;
596 case CCI500_PORT_GLOBAL:
597 if_type = CCI_IF_GLOBAL;
603 if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
604 ev_code <= cci_pmu->model->event_ranges[if_type].max)
609 #endif /* CONFIG_ARM_CCI500_PMU */
612 * Program the CCI PMU counters which have PERF_HES_ARCH set
613 * with the event period and mark them ready before we enable
616 void cci_pmu_sync_counters(struct cci_pmu *cci_pmu)
619 struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
621 DECLARE_BITMAP(mask, cci_pmu->num_cntrs);
623 bitmap_zero(mask, cci_pmu->num_cntrs);
624 for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) {
625 struct perf_event *event = cci_hw->events[i];
630 /* Leave the events which are not counting */
631 if (event->hw.state & PERF_HES_STOPPED)
633 if (event->hw.state & PERF_HES_ARCH) {
635 event->hw.state &= ~PERF_HES_ARCH;
639 pmu_write_counters(cci_pmu, mask);
642 /* Should be called with cci_pmu->hw_events->pmu_lock held */
643 static void __cci_pmu_enable(struct cci_pmu *cci_pmu)
647 cci_pmu_sync_counters(cci_pmu);
649 /* Enable all the PMU counters. */
650 val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
651 writel(val, cci_ctrl_base + CCI_PMCR);
654 /* Should be called with cci_pmu->hw_events->pmu_lock held */
655 static void __cci_pmu_disable(void)
659 /* Disable all the PMU counters. */
660 val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
661 writel(val, cci_ctrl_base + CCI_PMCR);
664 static ssize_t cci_pmu_format_show(struct device *dev,
665 struct device_attribute *attr, char *buf)
667 struct dev_ext_attribute *eattr = container_of(attr,
668 struct dev_ext_attribute, attr);
669 return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var);
672 static ssize_t cci_pmu_event_show(struct device *dev,
673 struct device_attribute *attr, char *buf)
675 struct dev_ext_attribute *eattr = container_of(attr,
676 struct dev_ext_attribute, attr);
677 /* source parameter is mandatory for normal PMU events */
678 return snprintf(buf, PAGE_SIZE, "source=?,event=0x%lx\n",
679 (unsigned long)eattr->var);
682 static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
684 return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu);
687 static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset)
689 return readl_relaxed(cci_pmu->base +
690 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
693 static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value,
694 int idx, unsigned int offset)
696 return writel_relaxed(value, cci_pmu->base +
697 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
700 static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx)
702 pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL);
705 static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx)
707 pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL);
710 static bool __maybe_unused
711 pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx)
713 return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0;
716 static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event)
718 pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL);
722 * For all counters on the CCI-PMU, disable any 'enabled' counters,
723 * saving the changed counters in the mask, so that we can restore
724 * it later using pmu_restore_counters. The mask is private to the
725 * caller. We cannot rely on the used_mask maintained by the CCI_PMU
726 * as it only tells us if the counter is assigned to perf_event or not.
727 * The state of the perf_event cannot be locked by the PMU layer, hence
728 * we check the individual counter status (which can be locked by
729 * cci_pm->hw_events->pmu_lock).
731 * @mask should be initialised to empty by the caller.
733 static void __maybe_unused
734 pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
738 for (i = 0; i < cci_pmu->num_cntrs; i++) {
739 if (pmu_counter_is_enabled(cci_pmu, i)) {
741 pmu_disable_counter(cci_pmu, i);
747 * Restore the status of the counters. Reversal of the pmu_save_counters().
748 * For each counter set in the mask, enable the counter back.
750 static void __maybe_unused
751 pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
755 for_each_set_bit(i, mask, cci_pmu->num_cntrs)
756 pmu_enable_counter(cci_pmu, i);
760 * Returns the number of programmable counters actually implemented
763 static u32 pmu_get_max_counters(void)
765 return (readl_relaxed(cci_ctrl_base + CCI_PMCR) &
766 CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT;
769 static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event)
771 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
772 unsigned long cci_event = event->hw.config_base;
775 if (cci_pmu->model->get_event_idx)
776 return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event);
778 /* Generic code to find an unused idx from the mask */
779 for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++)
780 if (!test_and_set_bit(idx, hw->used_mask))
783 /* No counters available */
787 static int pmu_map_event(struct perf_event *event)
789 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
791 if (event->attr.type < PERF_TYPE_MAX ||
792 !cci_pmu->model->validate_hw_event)
795 return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config);
798 static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler)
801 struct platform_device *pmu_device = cci_pmu->plat_device;
803 if (unlikely(!pmu_device))
806 if (cci_pmu->nr_irqs < 1) {
807 dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n");
812 * Register all available CCI PMU interrupts. In the interrupt handler
813 * we iterate over the counters checking for interrupt source (the
814 * overflowing counter) and clear it.
816 * This should allow handling of non-unique interrupt for the counters.
818 for (i = 0; i < cci_pmu->nr_irqs; i++) {
819 int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED,
820 "arm-cci-pmu", cci_pmu);
822 dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
827 set_bit(i, &cci_pmu->active_irqs);
833 static void pmu_free_irq(struct cci_pmu *cci_pmu)
837 for (i = 0; i < cci_pmu->nr_irqs; i++) {
838 if (!test_and_clear_bit(i, &cci_pmu->active_irqs))
841 free_irq(cci_pmu->irqs[i], cci_pmu);
845 static u32 pmu_read_counter(struct perf_event *event)
847 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
848 struct hw_perf_event *hw_counter = &event->hw;
849 int idx = hw_counter->idx;
852 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
853 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
856 value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR);
861 static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx)
863 pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR);
866 static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
869 struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
871 for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
872 struct perf_event *event = cci_hw->events[i];
876 pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
880 static u64 pmu_event_update(struct perf_event *event)
882 struct hw_perf_event *hwc = &event->hw;
883 u64 delta, prev_raw_count, new_raw_count;
886 prev_raw_count = local64_read(&hwc->prev_count);
887 new_raw_count = pmu_read_counter(event);
888 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
889 new_raw_count) != prev_raw_count);
891 delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK;
893 local64_add(delta, &event->count);
895 return new_raw_count;
898 static void pmu_read(struct perf_event *event)
900 pmu_event_update(event);
903 void pmu_event_set_period(struct perf_event *event)
905 struct hw_perf_event *hwc = &event->hw;
907 * The CCI PMU counters have a period of 2^32. To account for the
908 * possiblity of extreme interrupt latency we program for a period of
909 * half that. Hopefully we can handle the interrupt before another 2^31
910 * events occur and the counter overtakes its previous value.
912 u64 val = 1ULL << 31;
913 local64_set(&hwc->prev_count, val);
916 * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose
917 * values needs to be sync-ed with the s/w state before the PMU is
919 * Mark this counter for sync.
921 hwc->state |= PERF_HES_ARCH;
924 static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
927 struct cci_pmu *cci_pmu = dev;
928 struct cci_pmu_hw_events *events = &cci_pmu->hw_events;
929 int idx, handled = IRQ_NONE;
931 raw_spin_lock_irqsave(&events->pmu_lock, flags);
933 /* Disable the PMU while we walk through the counters */
936 * Iterate over counters and update the corresponding perf events.
937 * This should work regardless of whether we have per-counter overflow
938 * interrupt or a combined overflow interrupt.
940 for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
941 struct perf_event *event = events->events[idx];
942 struct hw_perf_event *hw_counter;
947 hw_counter = &event->hw;
949 /* Did this counter overflow? */
950 if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) &
951 CCI_PMU_OVRFLW_FLAG))
954 pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx,
957 pmu_event_update(event);
958 pmu_event_set_period(event);
959 handled = IRQ_HANDLED;
962 /* Enable the PMU and sync possibly overflowed counters */
963 __cci_pmu_enable(cci_pmu);
964 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
966 return IRQ_RETVAL(handled);
969 static int cci_pmu_get_hw(struct cci_pmu *cci_pmu)
971 int ret = pmu_request_irq(cci_pmu, pmu_handle_irq);
973 pmu_free_irq(cci_pmu);
979 static void cci_pmu_put_hw(struct cci_pmu *cci_pmu)
981 pmu_free_irq(cci_pmu);
984 static void hw_perf_event_destroy(struct perf_event *event)
986 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
987 atomic_t *active_events = &cci_pmu->active_events;
988 struct mutex *reserve_mutex = &cci_pmu->reserve_mutex;
990 if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) {
991 cci_pmu_put_hw(cci_pmu);
992 mutex_unlock(reserve_mutex);
996 static void cci_pmu_enable(struct pmu *pmu)
998 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
999 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1000 int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs);
1001 unsigned long flags;
1006 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
1007 __cci_pmu_enable(cci_pmu);
1008 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
1012 static void cci_pmu_disable(struct pmu *pmu)
1014 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
1015 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1016 unsigned long flags;
1018 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
1019 __cci_pmu_disable();
1020 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
1024 * Check if the idx represents a non-programmable counter.
1025 * All the fixed event counters are mapped before the programmable
1028 static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx)
1030 return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs);
1033 static void cci_pmu_start(struct perf_event *event, int pmu_flags)
1035 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1036 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1037 struct hw_perf_event *hwc = &event->hw;
1039 unsigned long flags;
1042 * To handle interrupt latency, we always reprogram the period
1043 * regardlesss of PERF_EF_RELOAD.
1045 if (pmu_flags & PERF_EF_RELOAD)
1046 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
1050 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
1051 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
1055 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
1057 /* Configure the counter unless you are counting a fixed event */
1058 if (!pmu_fixed_hw_idx(cci_pmu, idx))
1059 pmu_set_event(cci_pmu, idx, hwc->config_base);
1061 pmu_event_set_period(event);
1062 pmu_enable_counter(cci_pmu, idx);
1064 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
1067 static void cci_pmu_stop(struct perf_event *event, int pmu_flags)
1069 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1070 struct hw_perf_event *hwc = &event->hw;
1073 if (hwc->state & PERF_HES_STOPPED)
1076 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
1077 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
1082 * We always reprogram the counter, so ignore PERF_EF_UPDATE. See
1085 pmu_disable_counter(cci_pmu, idx);
1086 pmu_event_update(event);
1087 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
1090 static int cci_pmu_add(struct perf_event *event, int flags)
1092 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1093 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1094 struct hw_perf_event *hwc = &event->hw;
1098 perf_pmu_disable(event->pmu);
1100 /* If we don't have a space for the counter then finish early. */
1101 idx = pmu_get_event_idx(hw_events, event);
1107 event->hw.idx = idx;
1108 hw_events->events[idx] = event;
1110 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
1111 if (flags & PERF_EF_START)
1112 cci_pmu_start(event, PERF_EF_RELOAD);
1114 /* Propagate our changes to the userspace mapping. */
1115 perf_event_update_userpage(event);
1118 perf_pmu_enable(event->pmu);
1122 static void cci_pmu_del(struct perf_event *event, int flags)
1124 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1125 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1126 struct hw_perf_event *hwc = &event->hw;
1129 cci_pmu_stop(event, PERF_EF_UPDATE);
1130 hw_events->events[idx] = NULL;
1131 clear_bit(idx, hw_events->used_mask);
1133 perf_event_update_userpage(event);
1137 validate_event(struct pmu *cci_pmu,
1138 struct cci_pmu_hw_events *hw_events,
1139 struct perf_event *event)
1141 if (is_software_event(event))
1145 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
1146 * core perf code won't check that the pmu->ctx == leader->ctx
1147 * until after pmu->event_init(event).
1149 if (event->pmu != cci_pmu)
1152 if (event->state < PERF_EVENT_STATE_OFF)
1155 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
1158 return pmu_get_event_idx(hw_events, event) >= 0;
1162 validate_group(struct perf_event *event)
1164 struct perf_event *sibling, *leader = event->group_leader;
1165 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1166 unsigned long mask[BITS_TO_LONGS(cci_pmu->num_cntrs)];
1167 struct cci_pmu_hw_events fake_pmu = {
1169 * Initialise the fake PMU. We only need to populate the
1170 * used_mask for the purposes of validation.
1174 memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long));
1176 if (!validate_event(event->pmu, &fake_pmu, leader))
1179 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
1180 if (!validate_event(event->pmu, &fake_pmu, sibling))
1184 if (!validate_event(event->pmu, &fake_pmu, event))
1191 __hw_perf_event_init(struct perf_event *event)
1193 struct hw_perf_event *hwc = &event->hw;
1196 mapping = pmu_map_event(event);
1199 pr_debug("event %x:%llx not supported\n", event->attr.type,
1200 event->attr.config);
1205 * We don't assign an index until we actually place the event onto
1206 * hardware. Use -1 to signify that we haven't decided where to put it
1210 hwc->config_base = 0;
1212 hwc->event_base = 0;
1215 * Store the event encoding into the config_base field.
1217 hwc->config_base |= (unsigned long)mapping;
1220 * Limit the sample_period to half of the counter width. That way, the
1221 * new counter value is far less likely to overtake the previous one
1222 * unless you have some serious IRQ latency issues.
1224 hwc->sample_period = CCI_PMU_CNTR_MASK >> 1;
1225 hwc->last_period = hwc->sample_period;
1226 local64_set(&hwc->period_left, hwc->sample_period);
1228 if (event->group_leader != event) {
1229 if (validate_group(event) != 0)
1236 static int cci_pmu_event_init(struct perf_event *event)
1238 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1239 atomic_t *active_events = &cci_pmu->active_events;
1243 if (event->attr.type != event->pmu->type)
1246 /* Shared by all CPUs, no meaningful state to sample */
1247 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
1250 /* We have no filtering of any kind */
1251 if (event->attr.exclude_user ||
1252 event->attr.exclude_kernel ||
1253 event->attr.exclude_hv ||
1254 event->attr.exclude_idle ||
1255 event->attr.exclude_host ||
1256 event->attr.exclude_guest)
1260 * Following the example set by other "uncore" PMUs, we accept any CPU
1261 * and rewrite its affinity dynamically rather than having perf core
1262 * handle cpu == -1 and pid == -1 for this case.
1264 * The perf core will pin online CPUs for the duration of this call and
1265 * the event being installed into its context, so the PMU's CPU can't
1266 * change under our feet.
1268 cpu = cpumask_first(&cci_pmu->cpus);
1269 if (event->cpu < 0 || cpu < 0)
1273 event->destroy = hw_perf_event_destroy;
1274 if (!atomic_inc_not_zero(active_events)) {
1275 mutex_lock(&cci_pmu->reserve_mutex);
1276 if (atomic_read(active_events) == 0)
1277 err = cci_pmu_get_hw(cci_pmu);
1279 atomic_inc(active_events);
1280 mutex_unlock(&cci_pmu->reserve_mutex);
1285 err = __hw_perf_event_init(event);
1287 hw_perf_event_destroy(event);
1292 static ssize_t pmu_cpumask_attr_show(struct device *dev,
1293 struct device_attribute *attr, char *buf)
1295 struct pmu *pmu = dev_get_drvdata(dev);
1296 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
1298 int n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
1299 cpumask_pr_args(&cci_pmu->cpus));
1305 static struct device_attribute pmu_cpumask_attr =
1306 __ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL);
1308 static struct attribute *pmu_attrs[] = {
1309 &pmu_cpumask_attr.attr,
1313 static struct attribute_group pmu_attr_group = {
1317 static struct attribute_group pmu_format_attr_group = {
1319 .attrs = NULL, /* Filled in cci_pmu_init_attrs */
1322 static struct attribute_group pmu_event_attr_group = {
1324 .attrs = NULL, /* Filled in cci_pmu_init_attrs */
1327 static const struct attribute_group *pmu_attr_groups[] = {
1329 &pmu_format_attr_group,
1330 &pmu_event_attr_group,
1334 static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
1336 const struct cci_pmu_model *model = cci_pmu->model;
1337 char *name = model->name;
1340 pmu_event_attr_group.attrs = model->event_attrs;
1341 pmu_format_attr_group.attrs = model->format_attrs;
1343 cci_pmu->pmu = (struct pmu) {
1344 .name = cci_pmu->model->name,
1345 .task_ctx_nr = perf_invalid_context,
1346 .pmu_enable = cci_pmu_enable,
1347 .pmu_disable = cci_pmu_disable,
1348 .event_init = cci_pmu_event_init,
1351 .start = cci_pmu_start,
1352 .stop = cci_pmu_stop,
1354 .attr_groups = pmu_attr_groups,
1357 cci_pmu->plat_device = pdev;
1358 num_cntrs = pmu_get_max_counters();
1359 if (num_cntrs > cci_pmu->model->num_hw_cntrs) {
1360 dev_warn(&pdev->dev,
1361 "PMU implements more counters(%d) than supported by"
1362 " the model(%d), truncated.",
1363 num_cntrs, cci_pmu->model->num_hw_cntrs);
1364 num_cntrs = cci_pmu->model->num_hw_cntrs;
1366 cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs;
1368 return perf_pmu_register(&cci_pmu->pmu, name, -1);
1371 static int cci_pmu_cpu_notifier(struct notifier_block *self,
1372 unsigned long action, void *hcpu)
1374 struct cci_pmu *cci_pmu = container_of(self,
1375 struct cci_pmu, cpu_nb);
1376 unsigned int cpu = (long)hcpu;
1377 unsigned int target;
1379 switch (action & ~CPU_TASKS_FROZEN) {
1380 case CPU_DOWN_PREPARE:
1381 if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
1383 target = cpumask_any_but(cpu_online_mask, cpu);
1384 if (target >= nr_cpu_ids) // UP, last CPU
1387 * TODO: migrate context once core races on event->ctx have
1390 cpumask_set_cpu(target, &cci_pmu->cpus);
1398 static struct cci_pmu_model cci_pmu_models[] = {
1399 #ifdef CONFIG_ARM_CCI400_PMU
1402 .fixed_hw_cntrs = 1, /* Cycle counter */
1405 .format_attrs = cci400_pmu_format_attrs,
1406 .event_attrs = cci400_r0_pmu_event_attrs,
1409 CCI400_R0_SLAVE_PORT_MIN_EV,
1410 CCI400_R0_SLAVE_PORT_MAX_EV,
1413 CCI400_R0_MASTER_PORT_MIN_EV,
1414 CCI400_R0_MASTER_PORT_MAX_EV,
1417 .validate_hw_event = cci400_validate_hw_event,
1418 .get_event_idx = cci400_get_event_idx,
1421 .name = "CCI_400_r1",
1422 .fixed_hw_cntrs = 1, /* Cycle counter */
1425 .format_attrs = cci400_pmu_format_attrs,
1426 .event_attrs = cci400_r1_pmu_event_attrs,
1429 CCI400_R1_SLAVE_PORT_MIN_EV,
1430 CCI400_R1_SLAVE_PORT_MAX_EV,
1433 CCI400_R1_MASTER_PORT_MIN_EV,
1434 CCI400_R1_MASTER_PORT_MAX_EV,
1437 .validate_hw_event = cci400_validate_hw_event,
1438 .get_event_idx = cci400_get_event_idx,
1441 #ifdef CONFIG_ARM_CCI500_PMU
1444 .fixed_hw_cntrs = 0,
1446 .cntr_size = SZ_64K,
1447 .format_attrs = cci500_pmu_format_attrs,
1448 .event_attrs = cci500_pmu_event_attrs,
1451 CCI500_SLAVE_PORT_MIN_EV,
1452 CCI500_SLAVE_PORT_MAX_EV,
1455 CCI500_MASTER_PORT_MIN_EV,
1456 CCI500_MASTER_PORT_MAX_EV,
1459 CCI500_GLOBAL_PORT_MIN_EV,
1460 CCI500_GLOBAL_PORT_MAX_EV,
1463 .validate_hw_event = cci500_validate_hw_event,
1468 static const struct of_device_id arm_cci_pmu_matches[] = {
1469 #ifdef CONFIG_ARM_CCI400_PMU
1471 .compatible = "arm,cci-400-pmu",
1475 .compatible = "arm,cci-400-pmu,r0",
1476 .data = &cci_pmu_models[CCI400_R0],
1479 .compatible = "arm,cci-400-pmu,r1",
1480 .data = &cci_pmu_models[CCI400_R1],
1483 #ifdef CONFIG_ARM_CCI500_PMU
1485 .compatible = "arm,cci-500-pmu,r0",
1486 .data = &cci_pmu_models[CCI500_R0],
1492 static inline const struct cci_pmu_model *get_cci_model(struct platform_device *pdev)
1494 const struct of_device_id *match = of_match_node(arm_cci_pmu_matches,
1501 dev_warn(&pdev->dev, "DEPRECATED compatible property,"
1502 "requires secure access to CCI registers");
1503 return probe_cci_model(pdev);
1506 static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
1510 for (i = 0; i < nr_irqs; i++)
1517 static struct cci_pmu *cci_pmu_alloc(struct platform_device *pdev)
1519 struct cci_pmu *cci_pmu;
1520 const struct cci_pmu_model *model;
1523 * All allocations are devm_* hence we don't have to free
1524 * them explicitly on an error, as it would end up in driver
1527 model = get_cci_model(pdev);
1529 dev_warn(&pdev->dev, "CCI PMU version not supported\n");
1530 return ERR_PTR(-ENODEV);
1533 cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*cci_pmu), GFP_KERNEL);
1535 return ERR_PTR(-ENOMEM);
1537 cci_pmu->model = model;
1538 cci_pmu->irqs = devm_kcalloc(&pdev->dev, CCI_PMU_MAX_HW_CNTRS(model),
1539 sizeof(*cci_pmu->irqs), GFP_KERNEL);
1541 return ERR_PTR(-ENOMEM);
1542 cci_pmu->hw_events.events = devm_kcalloc(&pdev->dev,
1543 CCI_PMU_MAX_HW_CNTRS(model),
1544 sizeof(*cci_pmu->hw_events.events),
1546 if (!cci_pmu->hw_events.events)
1547 return ERR_PTR(-ENOMEM);
1548 cci_pmu->hw_events.used_mask = devm_kcalloc(&pdev->dev,
1549 BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)),
1550 sizeof(*cci_pmu->hw_events.used_mask),
1552 if (!cci_pmu->hw_events.used_mask)
1553 return ERR_PTR(-ENOMEM);
1559 static int cci_pmu_probe(struct platform_device *pdev)
1561 struct resource *res;
1562 struct cci_pmu *cci_pmu;
1565 cci_pmu = cci_pmu_alloc(pdev);
1566 if (IS_ERR(cci_pmu))
1567 return PTR_ERR(cci_pmu);
1569 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1570 cci_pmu->base = devm_ioremap_resource(&pdev->dev, res);
1571 if (IS_ERR(cci_pmu->base))
1575 * CCI PMU has one overflow interrupt per counter; but some may be tied
1576 * together to a common interrupt.
1578 cci_pmu->nr_irqs = 0;
1579 for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) {
1580 irq = platform_get_irq(pdev, i);
1584 if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs))
1587 cci_pmu->irqs[cci_pmu->nr_irqs++] = irq;
1591 * Ensure that the device tree has as many interrupts as the number
1594 if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) {
1595 dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n",
1596 i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model));
1600 raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
1601 mutex_init(&cci_pmu->reserve_mutex);
1602 atomic_set(&cci_pmu->active_events, 0);
1603 cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
1605 cci_pmu->cpu_nb = (struct notifier_block) {
1606 .notifier_call = cci_pmu_cpu_notifier,
1608 * to migrate uncore events, our notifier should be executed
1609 * before perf core's notifier.
1611 .priority = CPU_PRI_PERF + 1,
1614 ret = register_cpu_notifier(&cci_pmu->cpu_nb);
1618 ret = cci_pmu_init(cci_pmu, pdev);
1620 unregister_cpu_notifier(&cci_pmu->cpu_nb);
1624 pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
1628 static int cci_platform_probe(struct platform_device *pdev)
1633 return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
1636 static struct platform_driver cci_pmu_driver = {
1638 .name = DRIVER_NAME_PMU,
1639 .of_match_table = arm_cci_pmu_matches,
1641 .probe = cci_pmu_probe,
1644 static struct platform_driver cci_platform_driver = {
1646 .name = DRIVER_NAME,
1647 .of_match_table = arm_cci_matches,
1649 .probe = cci_platform_probe,
1652 static int __init cci_platform_init(void)
1656 ret = platform_driver_register(&cci_pmu_driver);
1660 return platform_driver_register(&cci_platform_driver);
1663 #else /* !CONFIG_ARM_CCI_PMU */
1665 static int __init cci_platform_init(void)
1670 #endif /* CONFIG_ARM_CCI_PMU */
1672 #ifdef CONFIG_ARM_CCI400_PORT_CTRL
1674 #define CCI_PORT_CTRL 0x0
1675 #define CCI_CTRL_STATUS 0xc
1677 #define CCI_ENABLE_SNOOP_REQ 0x1
1678 #define CCI_ENABLE_DVM_REQ 0x2
1679 #define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
1681 enum cci_ace_port_type {
1682 ACE_INVALID_PORT = 0x0,
1687 struct cci_ace_port {
1690 enum cci_ace_port_type type;
1691 struct device_node *dn;
1694 static struct cci_ace_port *ports;
1695 static unsigned int nb_cci_ports;
1703 * Use the port MSB as valid flag, shift can be made dynamic
1704 * by computing number of bits required for port indexes.
1705 * Code disabling CCI cpu ports runs with D-cache invalidated
1706 * and SCTLR bit clear so data accesses must be kept to a minimum
1707 * to improve performance; for now shift is left static to
1708 * avoid one more data access while disabling the CCI port.
1710 #define PORT_VALID_SHIFT 31
1711 #define PORT_VALID (0x1 << PORT_VALID_SHIFT)
1713 static inline void init_cpu_port(struct cpu_port *port, u32 index, u64 mpidr)
1715 port->port = PORT_VALID | index;
1716 port->mpidr = mpidr;
1719 static inline bool cpu_port_is_valid(struct cpu_port *port)
1721 return !!(port->port & PORT_VALID);
1724 static inline bool cpu_port_match(struct cpu_port *port, u64 mpidr)
1726 return port->mpidr == (mpidr & MPIDR_HWID_BITMASK);
1729 static struct cpu_port cpu_port[NR_CPUS];
1732 * __cci_ace_get_port - Function to retrieve the port index connected to
1735 * @dn: device node of the device to look-up
1739 * - CCI port index if success
1740 * - -ENODEV if failure
1742 static int __cci_ace_get_port(struct device_node *dn, int type)
1746 struct device_node *cci_portn;
1748 cci_portn = of_parse_phandle(dn, "cci-control-port", 0);
1749 for (i = 0; i < nb_cci_ports; i++) {
1750 ace_match = ports[i].type == type;
1751 if (ace_match && cci_portn == ports[i].dn)
1757 int cci_ace_get_port(struct device_node *dn)
1759 return __cci_ace_get_port(dn, ACE_LITE_PORT);
1761 EXPORT_SYMBOL_GPL(cci_ace_get_port);
1763 static void cci_ace_init_ports(void)
1766 struct device_node *cpun;
1769 * Port index look-up speeds up the function disabling ports by CPU,
1770 * since the logical to port index mapping is done once and does
1771 * not change after system boot.
1772 * The stashed index array is initialized for all possible CPUs
1775 for_each_possible_cpu(cpu) {
1776 /* too early to use cpu->of_node */
1777 cpun = of_get_cpu_node(cpu, NULL);
1779 if (WARN(!cpun, "Missing cpu device node\n"))
1782 port = __cci_ace_get_port(cpun, ACE_PORT);
1786 init_cpu_port(&cpu_port[cpu], port, cpu_logical_map(cpu));
1789 for_each_possible_cpu(cpu) {
1790 WARN(!cpu_port_is_valid(&cpu_port[cpu]),
1791 "CPU %u does not have an associated CCI port\n",
1796 * Functions to enable/disable a CCI interconnect slave port
1798 * They are called by low-level power management code to disable slave
1799 * interfaces snoops and DVM broadcast.
1800 * Since they may execute with cache data allocation disabled and
1801 * after the caches have been cleaned and invalidated the functions provide
1802 * no explicit locking since they may run with D-cache disabled, so normal
1803 * cacheable kernel locks based on ldrex/strex may not work.
1804 * Locking has to be provided by BSP implementations to ensure proper
1809 * cci_port_control() - function to control a CCI port
1811 * @port: index of the port to setup
1812 * @enable: if true enables the port, if false disables it
1814 static void notrace cci_port_control(unsigned int port, bool enable)
1816 void __iomem *base = ports[port].base;
1818 writel_relaxed(enable ? CCI_ENABLE_REQ : 0, base + CCI_PORT_CTRL);
1820 * This function is called from power down procedures
1821 * and must not execute any instruction that might
1822 * cause the processor to be put in a quiescent state
1823 * (eg wfi). Hence, cpu_relax() can not be added to this
1824 * read loop to optimize power, since it might hide possibly
1825 * disruptive operations.
1827 while (readl_relaxed(cci_ctrl_base + CCI_CTRL_STATUS) & 0x1)
1832 * cci_disable_port_by_cpu() - function to disable a CCI port by CPU
1835 * @mpidr: mpidr of the CPU whose CCI port should be disabled
1837 * Disabling a CCI port for a CPU implies disabling the CCI port
1838 * controlling that CPU cluster. Code disabling CPU CCI ports
1839 * must make sure that the CPU running the code is the last active CPU
1840 * in the cluster ie all other CPUs are quiescent in a low power state.
1844 * -ENODEV on port look-up failure
1846 int notrace cci_disable_port_by_cpu(u64 mpidr)
1850 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
1851 is_valid = cpu_port_is_valid(&cpu_port[cpu]);
1852 if (is_valid && cpu_port_match(&cpu_port[cpu], mpidr)) {
1853 cci_port_control(cpu_port[cpu].port, false);
1859 EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu);
1862 * cci_enable_port_for_self() - enable a CCI port for calling CPU
1864 * Enabling a CCI port for the calling CPU implies enabling the CCI
1865 * port controlling that CPU's cluster. Caller must make sure that the
1866 * CPU running the code is the first active CPU in the cluster and all
1867 * other CPUs are quiescent in a low power state or waiting for this CPU
1868 * to complete the CCI initialization.
1870 * Because this is called when the MMU is still off and with no stack,
1871 * the code must be position independent and ideally rely on callee
1872 * clobbered registers only. To achieve this we must code this function
1873 * entirely in assembler.
1875 * On success this returns with the proper CCI port enabled. In case of
1876 * any failure this never returns as the inability to enable the CCI is
1877 * fatal and there is no possible recovery at this stage.
1879 asmlinkage void __naked cci_enable_port_for_self(void)
1883 " mrc p15, 0, r0, c0, c0, 5 @ get MPIDR value \n"
1884 " and r0, r0, #"__stringify(MPIDR_HWID_BITMASK)" \n"
1887 " add r1, r1, r2 @ &cpu_port \n"
1888 " add ip, r1, %[sizeof_cpu_port] \n"
1890 /* Loop over the cpu_port array looking for a matching MPIDR */
1891 "1: ldr r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n"
1892 " cmp r2, r0 @ compare MPIDR \n"
1895 /* Found a match, now test port validity */
1896 " ldr r3, [r1, %[offsetof_cpu_port_port]] \n"
1897 " tst r3, #"__stringify(PORT_VALID)" \n"
1900 /* no match, loop with the next cpu_port entry */
1901 "2: add r1, r1, %[sizeof_struct_cpu_port] \n"
1902 " cmp r1, ip @ done? \n"
1905 /* CCI port not found -- cheaply try to stall this CPU */
1906 "cci_port_not_found: \n"
1909 " b cci_port_not_found \n"
1911 /* Use matched port index to look up the corresponding ports entry */
1912 "3: bic r3, r3, #"__stringify(PORT_VALID)" \n"
1914 " ldmia r0, {r1, r2} \n"
1915 " sub r1, r1, r0 @ virt - phys \n"
1916 " ldr r0, [r0, r2] @ *(&ports) \n"
1917 " mov r2, %[sizeof_struct_ace_port] \n"
1918 " mla r0, r2, r3, r0 @ &ports[index] \n"
1919 " sub r0, r0, r1 @ virt_to_phys() \n"
1921 /* Enable the CCI port */
1922 " ldr r0, [r0, %[offsetof_port_phys]] \n"
1923 " mov r3, %[cci_enable_req]\n"
1924 " str r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n"
1926 /* poll the status reg for completion */
1929 " ldr r0, [r0, r1] @ cci_ctrl_base \n"
1930 "4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n"
1931 " tst r1, %[cci_control_status_bits] \n"
1938 "5: .word cpu_port - . \n"
1940 " .word ports - 6b \n"
1941 "7: .word cci_ctrl_phys - . \n"
1943 [sizeof_cpu_port] "i" (sizeof(cpu_port)),
1944 [cci_enable_req] "i" cpu_to_le32(CCI_ENABLE_REQ),
1945 [cci_control_status_bits] "i" cpu_to_le32(1),
1947 [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)),
1949 [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)+4),
1951 [offsetof_cpu_port_port] "i" (offsetof(struct cpu_port, port)),
1952 [sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)),
1953 [sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)),
1954 [offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) );
1960 * __cci_control_port_by_device() - function to control a CCI port by device
1963 * @dn: device node pointer of the device whose CCI port should be
1965 * @enable: if true enables the port, if false disables it
1969 * -ENODEV on port look-up failure
1971 int notrace __cci_control_port_by_device(struct device_node *dn, bool enable)
1978 port = __cci_ace_get_port(dn, ACE_LITE_PORT);
1979 if (WARN_ONCE(port < 0, "node %s ACE lite port look-up failure\n",
1982 cci_port_control(port, enable);
1985 EXPORT_SYMBOL_GPL(__cci_control_port_by_device);
1988 * __cci_control_port_by_index() - function to control a CCI port by port index
1990 * @port: port index previously retrieved with cci_ace_get_port()
1991 * @enable: if true enables the port, if false disables it
1995 * -ENODEV on port index out of range
1996 * -EPERM if operation carried out on an ACE PORT
1998 int notrace __cci_control_port_by_index(u32 port, bool enable)
2000 if (port >= nb_cci_ports || ports[port].type == ACE_INVALID_PORT)
2003 * CCI control for ports connected to CPUS is extremely fragile
2004 * and must be made to go through a specific and controlled
2005 * interface (ie cci_disable_port_by_cpu(); control by general purpose
2006 * indexing is therefore disabled for ACE ports.
2008 if (ports[port].type == ACE_PORT)
2011 cci_port_control(port, enable);
2014 EXPORT_SYMBOL_GPL(__cci_control_port_by_index);
2016 static const struct of_device_id arm_cci_ctrl_if_matches[] = {
2017 {.compatible = "arm,cci-400-ctrl-if", },
2021 static int cci_probe_ports(struct device_node *np)
2023 struct cci_nb_ports const *cci_config;
2024 int ret, i, nb_ace = 0, nb_ace_lite = 0;
2025 struct device_node *cp;
2026 struct resource res;
2027 const char *match_str;
2031 cci_config = of_match_node(arm_cci_matches, np)->data;
2035 nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
2037 ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
2041 for_each_child_of_node(np, cp) {
2042 if (!of_match_node(arm_cci_ctrl_if_matches, cp))
2045 i = nb_ace + nb_ace_lite;
2047 if (i >= nb_cci_ports)
2050 if (of_property_read_string(cp, "interface-type",
2052 WARN(1, "node %s missing interface-type property\n",
2056 is_ace = strcmp(match_str, "ace") == 0;
2057 if (!is_ace && strcmp(match_str, "ace-lite")) {
2058 WARN(1, "node %s containing invalid interface-type property, skipping it\n",
2063 ret = of_address_to_resource(cp, 0, &res);
2065 ports[i].base = ioremap(res.start, resource_size(&res));
2066 ports[i].phys = res.start;
2068 if (ret || !ports[i].base) {
2069 WARN(1, "unable to ioremap CCI port %d\n", i);
2074 if (WARN_ON(nb_ace >= cci_config->nb_ace))
2076 ports[i].type = ACE_PORT;
2079 if (WARN_ON(nb_ace_lite >= cci_config->nb_ace_lite))
2081 ports[i].type = ACE_LITE_PORT;
2087 /* initialize a stashed array of ACE ports to speed-up look-up */
2088 cci_ace_init_ports();
2091 * Multi-cluster systems may need this data when non-coherent, during
2092 * cluster power-up/power-down. Make sure it reaches main memory.
2094 sync_cache_w(&cci_ctrl_base);
2095 sync_cache_w(&cci_ctrl_phys);
2096 sync_cache_w(&ports);
2097 sync_cache_w(&cpu_port);
2098 __sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports);
2099 pr_info("ARM CCI driver probed\n");
2103 #else /* !CONFIG_ARM_CCI400_PORT_CTRL */
2104 static inline int cci_probe_ports(struct device_node *np)
2108 #endif /* CONFIG_ARM_CCI400_PORT_CTRL */
2110 static int cci_probe(void)
2113 struct device_node *np;
2114 struct resource res;
2116 np = of_find_matching_node(NULL, arm_cci_matches);
2117 if(!np || !of_device_is_available(np))
2120 ret = of_address_to_resource(np, 0, &res);
2122 cci_ctrl_base = ioremap(res.start, resource_size(&res));
2123 cci_ctrl_phys = res.start;
2125 if (ret || !cci_ctrl_base) {
2126 WARN(1, "unable to ioremap CCI ctrl\n");
2130 return cci_probe_ports(np);
2133 static int cci_init_status = -EAGAIN;
2134 static DEFINE_MUTEX(cci_probing);
2136 static int cci_init(void)
2138 if (cci_init_status != -EAGAIN)
2139 return cci_init_status;
2141 mutex_lock(&cci_probing);
2142 if (cci_init_status == -EAGAIN)
2143 cci_init_status = cci_probe();
2144 mutex_unlock(&cci_probing);
2145 return cci_init_status;
2149 * To sort out early init calls ordering a helper function is provided to
2150 * check if the CCI driver has beed initialized. Function check if the driver
2151 * has been initialized, if not it calls the init function that probes
2152 * the driver and updates the return value.
2154 bool cci_probed(void)
2156 return cci_init() == 0;
2158 EXPORT_SYMBOL_GPL(cci_probed);
2160 early_initcall(cci_init);
2161 core_initcall(cci_platform_init);
2162 MODULE_LICENSE("GPL");
2163 MODULE_DESCRIPTION("ARM CCI support");