]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/hwtracing/coresight/coresight-etm3x.c
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[karo-tx-linux.git] / drivers / hwtracing / coresight / coresight-etm3x.c
1 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  */
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
18 #include <linux/io.h>
19 #include <linux/err.h>
20 #include <linux/fs.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/smp.h>
24 #include <linux/sysfs.h>
25 #include <linux/stat.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/cpu.h>
28 #include <linux/of.h>
29 #include <linux/coresight.h>
30 #include <linux/amba/bus.h>
31 #include <linux/seq_file.h>
32 #include <linux/uaccess.h>
33 #include <linux/clk.h>
34 #include <asm/sections.h>
35
36 #include "coresight-etm.h"
37
38 static int boot_enable;
39 module_param_named(boot_enable, boot_enable, int, S_IRUGO);
40
41 /* The number of ETM/PTM currently registered */
42 static int etm_count;
43 static struct etm_drvdata *etmdrvdata[NR_CPUS];
44
45 static inline void etm_writel(struct etm_drvdata *drvdata,
46                               u32 val, u32 off)
47 {
48         if (drvdata->use_cp14) {
49                 if (etm_writel_cp14(off, val)) {
50                         dev_err(drvdata->dev,
51                                 "invalid CP14 access to ETM reg: %#x", off);
52                 }
53         } else {
54                 writel_relaxed(val, drvdata->base + off);
55         }
56 }
57
58 static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
59 {
60         u32 val;
61
62         if (drvdata->use_cp14) {
63                 if (etm_readl_cp14(off, &val)) {
64                         dev_err(drvdata->dev,
65                                 "invalid CP14 access to ETM reg: %#x", off);
66                 }
67         } else {
68                 val = readl_relaxed(drvdata->base + off);
69         }
70
71         return val;
72 }
73
74 /*
75  * Memory mapped writes to clear os lock are not supported on some processors
76  * and OS lock must be unlocked before any memory mapped access on such
77  * processors, otherwise memory mapped reads/writes will be invalid.
78  */
79 static void etm_os_unlock(void *info)
80 {
81         struct etm_drvdata *drvdata = (struct etm_drvdata *)info;
82         /* Writing any value to ETMOSLAR unlocks the trace registers */
83         etm_writel(drvdata, 0x0, ETMOSLAR);
84         isb();
85 }
86
87 static void etm_set_pwrdwn(struct etm_drvdata *drvdata)
88 {
89         u32 etmcr;
90
91         /* Ensure pending cp14 accesses complete before setting pwrdwn */
92         mb();
93         isb();
94         etmcr = etm_readl(drvdata, ETMCR);
95         etmcr |= ETMCR_PWD_DWN;
96         etm_writel(drvdata, etmcr, ETMCR);
97 }
98
99 static void etm_clr_pwrdwn(struct etm_drvdata *drvdata)
100 {
101         u32 etmcr;
102
103         etmcr = etm_readl(drvdata, ETMCR);
104         etmcr &= ~ETMCR_PWD_DWN;
105         etm_writel(drvdata, etmcr, ETMCR);
106         /* Ensure pwrup completes before subsequent cp14 accesses */
107         mb();
108         isb();
109 }
110
111 static void etm_set_pwrup(struct etm_drvdata *drvdata)
112 {
113         u32 etmpdcr;
114
115         etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
116         etmpdcr |= ETMPDCR_PWD_UP;
117         writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
118         /* Ensure pwrup completes before subsequent cp14 accesses */
119         mb();
120         isb();
121 }
122
123 static void etm_clr_pwrup(struct etm_drvdata *drvdata)
124 {
125         u32 etmpdcr;
126
127         /* Ensure pending cp14 accesses complete before clearing pwrup */
128         mb();
129         isb();
130         etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
131         etmpdcr &= ~ETMPDCR_PWD_UP;
132         writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
133 }
134
135 /**
136  * coresight_timeout_etm - loop until a bit has changed to a specific state.
137  * @drvdata: etm's private data structure.
138  * @offset: address of a register, starting from @addr.
139  * @position: the position of the bit of interest.
140  * @value: the value the bit should have.
141  *
142  * Basically the same as @coresight_timeout except for the register access
143  * method where we have to account for CP14 configurations.
144
145  * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
146  * TIMEOUT_US has elapsed, which ever happens first.
147  */
148
149 static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset,
150                                   int position, int value)
151 {
152         int i;
153         u32 val;
154
155         for (i = TIMEOUT_US; i > 0; i--) {
156                 val = etm_readl(drvdata, offset);
157                 /* Waiting on the bit to go from 0 to 1 */
158                 if (value) {
159                         if (val & BIT(position))
160                                 return 0;
161                 /* Waiting on the bit to go from 1 to 0 */
162                 } else {
163                         if (!(val & BIT(position)))
164                                 return 0;
165                 }
166
167                 /*
168                  * Delay is arbitrary - the specification doesn't say how long
169                  * we are expected to wait.  Extra check required to make sure
170                  * we don't wait needlessly on the last iteration.
171                  */
172                 if (i - 1)
173                         udelay(1);
174         }
175
176         return -EAGAIN;
177 }
178
179
180 static void etm_set_prog(struct etm_drvdata *drvdata)
181 {
182         u32 etmcr;
183
184         etmcr = etm_readl(drvdata, ETMCR);
185         etmcr |= ETMCR_ETM_PRG;
186         etm_writel(drvdata, etmcr, ETMCR);
187         /*
188          * Recommended by spec for cp14 accesses to ensure etmcr write is
189          * complete before polling etmsr
190          */
191         isb();
192         if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) {
193                 dev_err(drvdata->dev,
194                         "timeout observed when probing at offset %#x\n", ETMSR);
195         }
196 }
197
198 static void etm_clr_prog(struct etm_drvdata *drvdata)
199 {
200         u32 etmcr;
201
202         etmcr = etm_readl(drvdata, ETMCR);
203         etmcr &= ~ETMCR_ETM_PRG;
204         etm_writel(drvdata, etmcr, ETMCR);
205         /*
206          * Recommended by spec for cp14 accesses to ensure etmcr write is
207          * complete before polling etmsr
208          */
209         isb();
210         if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) {
211                 dev_err(drvdata->dev,
212                         "timeout observed when probing at offset %#x\n", ETMSR);
213         }
214 }
215
216 static void etm_set_default(struct etm_drvdata *drvdata)
217 {
218         int i;
219
220         drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
221         drvdata->enable_event = ETM_HARD_WIRE_RES_A;
222
223         drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL;
224         drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL;
225         drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL;
226         drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL;
227         drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL;
228         drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL;
229         drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL;
230
231         for (i = 0; i < drvdata->nr_cntr; i++) {
232                 drvdata->cntr_rld_val[i] = 0x0;
233                 drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
234                 drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
235                 drvdata->cntr_val[i] = 0x0;
236         }
237
238         drvdata->seq_curr_state = 0x0;
239         drvdata->ctxid_idx = 0x0;
240         for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
241                 drvdata->ctxid_val[i] = 0x0;
242         drvdata->ctxid_mask = 0x0;
243 }
244
245 static void etm_enable_hw(void *info)
246 {
247         int i;
248         u32 etmcr;
249         struct etm_drvdata *drvdata = info;
250
251         CS_UNLOCK(drvdata->base);
252
253         /* Turn engine on */
254         etm_clr_pwrdwn(drvdata);
255         /* Apply power to trace registers */
256         etm_set_pwrup(drvdata);
257         /* Make sure all registers are accessible */
258         etm_os_unlock(drvdata);
259
260         etm_set_prog(drvdata);
261
262         etmcr = etm_readl(drvdata, ETMCR);
263         etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG);
264         etmcr |= drvdata->port_size;
265         etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
266         etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
267         etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
268         etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
269         etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
270         etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
271         for (i = 0; i < drvdata->nr_addr_cmp; i++) {
272                 etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
273                 etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
274         }
275         for (i = 0; i < drvdata->nr_cntr; i++) {
276                 etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
277                 etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
278                 etm_writel(drvdata, drvdata->cntr_rld_event[i],
279                            ETMCNTRLDEVRn(i));
280                 etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
281         }
282         etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
283         etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
284         etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
285         etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
286         etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
287         etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
288         etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
289         for (i = 0; i < drvdata->nr_ext_out; i++)
290                 etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
291         for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
292                 etm_writel(drvdata, drvdata->ctxid_val[i], ETMCIDCVRn(i));
293         etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
294         etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
295         /* No external input selected */
296         etm_writel(drvdata, 0x0, ETMEXTINSELR);
297         etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
298         /* No auxiliary control selected */
299         etm_writel(drvdata, 0x0, ETMAUXCR);
300         etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
301         /* No VMID comparator value selected */
302         etm_writel(drvdata, 0x0, ETMVMIDCVR);
303
304         /* Ensures trace output is enabled from this ETM */
305         etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR);
306
307         etm_clr_prog(drvdata);
308         CS_LOCK(drvdata->base);
309
310         dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
311 }
312
313 static int etm_trace_id_simple(struct etm_drvdata *drvdata)
314 {
315         if (!drvdata->enable)
316                 return drvdata->traceid;
317
318         return (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
319 }
320
321 static int etm_trace_id(struct coresight_device *csdev)
322 {
323         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
324         unsigned long flags;
325         int trace_id = -1;
326
327         if (!drvdata->enable)
328                 return drvdata->traceid;
329         pm_runtime_get_sync(csdev->dev.parent);
330
331         spin_lock_irqsave(&drvdata->spinlock, flags);
332
333         CS_UNLOCK(drvdata->base);
334         trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
335         CS_LOCK(drvdata->base);
336
337         spin_unlock_irqrestore(&drvdata->spinlock, flags);
338         pm_runtime_put(csdev->dev.parent);
339
340         return trace_id;
341 }
342
343 static int etm_enable(struct coresight_device *csdev)
344 {
345         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
346         int ret;
347
348         pm_runtime_get_sync(csdev->dev.parent);
349         spin_lock(&drvdata->spinlock);
350
351         /*
352          * Configure the ETM only if the CPU is online.  If it isn't online
353          * hw configuration will take place when 'CPU_STARTING' is received
354          * in @etm_cpu_callback.
355          */
356         if (cpu_online(drvdata->cpu)) {
357                 ret = smp_call_function_single(drvdata->cpu,
358                                                etm_enable_hw, drvdata, 1);
359                 if (ret)
360                         goto err;
361         }
362
363         drvdata->enable = true;
364         drvdata->sticky_enable = true;
365
366         spin_unlock(&drvdata->spinlock);
367
368         dev_info(drvdata->dev, "ETM tracing enabled\n");
369         return 0;
370 err:
371         spin_unlock(&drvdata->spinlock);
372         pm_runtime_put(csdev->dev.parent);
373         return ret;
374 }
375
376 static void etm_disable_hw(void *info)
377 {
378         int i;
379         struct etm_drvdata *drvdata = info;
380
381         CS_UNLOCK(drvdata->base);
382         etm_set_prog(drvdata);
383
384         /* Program trace enable to low by using always false event */
385         etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR);
386
387         /* Read back sequencer and counters for post trace analysis */
388         drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
389
390         for (i = 0; i < drvdata->nr_cntr; i++)
391                 drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
392
393         etm_set_pwrdwn(drvdata);
394         CS_LOCK(drvdata->base);
395
396         dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
397 }
398
399 static void etm_disable(struct coresight_device *csdev)
400 {
401         struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
402
403         /*
404          * Taking hotplug lock here protects from clocks getting disabled
405          * with tracing being left on (crash scenario) if user disable occurs
406          * after cpu online mask indicates the cpu is offline but before the
407          * DYING hotplug callback is serviced by the ETM driver.
408          */
409         get_online_cpus();
410         spin_lock(&drvdata->spinlock);
411
412         /*
413          * Executing etm_disable_hw on the cpu whose ETM is being disabled
414          * ensures that register writes occur when cpu is powered.
415          */
416         smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
417         drvdata->enable = false;
418
419         spin_unlock(&drvdata->spinlock);
420         put_online_cpus();
421         pm_runtime_put(csdev->dev.parent);
422
423         dev_info(drvdata->dev, "ETM tracing disabled\n");
424 }
425
426 static const struct coresight_ops_source etm_source_ops = {
427         .trace_id       = etm_trace_id,
428         .enable         = etm_enable,
429         .disable        = etm_disable,
430 };
431
432 static const struct coresight_ops etm_cs_ops = {
433         .source_ops     = &etm_source_ops,
434 };
435
436 static ssize_t nr_addr_cmp_show(struct device *dev,
437                                 struct device_attribute *attr, char *buf)
438 {
439         unsigned long val;
440         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
441
442         val = drvdata->nr_addr_cmp;
443         return sprintf(buf, "%#lx\n", val);
444 }
445 static DEVICE_ATTR_RO(nr_addr_cmp);
446
447 static ssize_t nr_cntr_show(struct device *dev,
448                             struct device_attribute *attr, char *buf)
449 {       unsigned long val;
450         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
451
452         val = drvdata->nr_cntr;
453         return sprintf(buf, "%#lx\n", val);
454 }
455 static DEVICE_ATTR_RO(nr_cntr);
456
457 static ssize_t nr_ctxid_cmp_show(struct device *dev,
458                                  struct device_attribute *attr, char *buf)
459 {
460         unsigned long val;
461         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
462
463         val = drvdata->nr_ctxid_cmp;
464         return sprintf(buf, "%#lx\n", val);
465 }
466 static DEVICE_ATTR_RO(nr_ctxid_cmp);
467
468 static ssize_t etmsr_show(struct device *dev,
469                           struct device_attribute *attr, char *buf)
470 {
471         unsigned long flags, val;
472         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
473
474         pm_runtime_get_sync(drvdata->dev);
475         spin_lock_irqsave(&drvdata->spinlock, flags);
476         CS_UNLOCK(drvdata->base);
477
478         val = etm_readl(drvdata, ETMSR);
479
480         CS_LOCK(drvdata->base);
481         spin_unlock_irqrestore(&drvdata->spinlock, flags);
482         pm_runtime_put(drvdata->dev);
483
484         return sprintf(buf, "%#lx\n", val);
485 }
486 static DEVICE_ATTR_RO(etmsr);
487
488 static ssize_t reset_store(struct device *dev,
489                            struct device_attribute *attr,
490                            const char *buf, size_t size)
491 {
492         int i, ret;
493         unsigned long val;
494         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
495
496         ret = kstrtoul(buf, 16, &val);
497         if (ret)
498                 return ret;
499
500         if (val) {
501                 spin_lock(&drvdata->spinlock);
502                 drvdata->mode = ETM_MODE_EXCLUDE;
503                 drvdata->ctrl = 0x0;
504                 drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
505                 drvdata->startstop_ctrl = 0x0;
506                 drvdata->addr_idx = 0x0;
507                 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
508                         drvdata->addr_val[i] = 0x0;
509                         drvdata->addr_acctype[i] = 0x0;
510                         drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
511                 }
512                 drvdata->cntr_idx = 0x0;
513
514                 etm_set_default(drvdata);
515                 spin_unlock(&drvdata->spinlock);
516         }
517
518         return size;
519 }
520 static DEVICE_ATTR_WO(reset);
521
522 static ssize_t mode_show(struct device *dev,
523                          struct device_attribute *attr, char *buf)
524 {
525         unsigned long val;
526         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
527
528         val = drvdata->mode;
529         return sprintf(buf, "%#lx\n", val);
530 }
531
532 static ssize_t mode_store(struct device *dev,
533                           struct device_attribute *attr,
534                           const char *buf, size_t size)
535 {
536         int ret;
537         unsigned long val;
538         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
539
540         ret = kstrtoul(buf, 16, &val);
541         if (ret)
542                 return ret;
543
544         spin_lock(&drvdata->spinlock);
545         drvdata->mode = val & ETM_MODE_ALL;
546
547         if (drvdata->mode & ETM_MODE_EXCLUDE)
548                 drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC;
549         else
550                 drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
551
552         if (drvdata->mode & ETM_MODE_CYCACC)
553                 drvdata->ctrl |= ETMCR_CYC_ACC;
554         else
555                 drvdata->ctrl &= ~ETMCR_CYC_ACC;
556
557         if (drvdata->mode & ETM_MODE_STALL) {
558                 if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
559                         dev_warn(drvdata->dev, "stall mode not supported\n");
560                         ret = -EINVAL;
561                         goto err_unlock;
562                 }
563                 drvdata->ctrl |= ETMCR_STALL_MODE;
564          } else
565                 drvdata->ctrl &= ~ETMCR_STALL_MODE;
566
567         if (drvdata->mode & ETM_MODE_TIMESTAMP) {
568                 if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
569                         dev_warn(drvdata->dev, "timestamp not supported\n");
570                         ret = -EINVAL;
571                         goto err_unlock;
572                 }
573                 drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
574         } else
575                 drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN;
576
577         if (drvdata->mode & ETM_MODE_CTXID)
578                 drvdata->ctrl |= ETMCR_CTXID_SIZE;
579         else
580                 drvdata->ctrl &= ~ETMCR_CTXID_SIZE;
581         spin_unlock(&drvdata->spinlock);
582
583         return size;
584
585 err_unlock:
586         spin_unlock(&drvdata->spinlock);
587         return ret;
588 }
589 static DEVICE_ATTR_RW(mode);
590
591 static ssize_t trigger_event_show(struct device *dev,
592                                   struct device_attribute *attr, char *buf)
593 {
594         unsigned long val;
595         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
596
597         val = drvdata->trigger_event;
598         return sprintf(buf, "%#lx\n", val);
599 }
600
601 static ssize_t trigger_event_store(struct device *dev,
602                                    struct device_attribute *attr,
603                                    const char *buf, size_t size)
604 {
605         int ret;
606         unsigned long val;
607         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
608
609         ret = kstrtoul(buf, 16, &val);
610         if (ret)
611                 return ret;
612
613         drvdata->trigger_event = val & ETM_EVENT_MASK;
614
615         return size;
616 }
617 static DEVICE_ATTR_RW(trigger_event);
618
619 static ssize_t enable_event_show(struct device *dev,
620                                  struct device_attribute *attr, char *buf)
621 {
622         unsigned long val;
623         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
624
625         val = drvdata->enable_event;
626         return sprintf(buf, "%#lx\n", val);
627 }
628
629 static ssize_t enable_event_store(struct device *dev,
630                                   struct device_attribute *attr,
631                                   const char *buf, size_t size)
632 {
633         int ret;
634         unsigned long val;
635         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
636
637         ret = kstrtoul(buf, 16, &val);
638         if (ret)
639                 return ret;
640
641         drvdata->enable_event = val & ETM_EVENT_MASK;
642
643         return size;
644 }
645 static DEVICE_ATTR_RW(enable_event);
646
647 static ssize_t fifofull_level_show(struct device *dev,
648                                    struct device_attribute *attr, char *buf)
649 {
650         unsigned long val;
651         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
652
653         val = drvdata->fifofull_level;
654         return sprintf(buf, "%#lx\n", val);
655 }
656
657 static ssize_t fifofull_level_store(struct device *dev,
658                                     struct device_attribute *attr,
659                                     const char *buf, size_t size)
660 {
661         int ret;
662         unsigned long val;
663         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
664
665         ret = kstrtoul(buf, 16, &val);
666         if (ret)
667                 return ret;
668
669         drvdata->fifofull_level = val;
670
671         return size;
672 }
673 static DEVICE_ATTR_RW(fifofull_level);
674
675 static ssize_t addr_idx_show(struct device *dev,
676                              struct device_attribute *attr, char *buf)
677 {
678         unsigned long val;
679         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
680
681         val = drvdata->addr_idx;
682         return sprintf(buf, "%#lx\n", val);
683 }
684
685 static ssize_t addr_idx_store(struct device *dev,
686                               struct device_attribute *attr,
687                               const char *buf, size_t size)
688 {
689         int ret;
690         unsigned long val;
691         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
692
693         ret = kstrtoul(buf, 16, &val);
694         if (ret)
695                 return ret;
696
697         if (val >= drvdata->nr_addr_cmp)
698                 return -EINVAL;
699
700         /*
701          * Use spinlock to ensure index doesn't change while it gets
702          * dereferenced multiple times within a spinlock block elsewhere.
703          */
704         spin_lock(&drvdata->spinlock);
705         drvdata->addr_idx = val;
706         spin_unlock(&drvdata->spinlock);
707
708         return size;
709 }
710 static DEVICE_ATTR_RW(addr_idx);
711
712 static ssize_t addr_single_show(struct device *dev,
713                                 struct device_attribute *attr, char *buf)
714 {
715         u8 idx;
716         unsigned long val;
717         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
718
719         spin_lock(&drvdata->spinlock);
720         idx = drvdata->addr_idx;
721         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
722               drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
723                 spin_unlock(&drvdata->spinlock);
724                 return -EINVAL;
725         }
726
727         val = drvdata->addr_val[idx];
728         spin_unlock(&drvdata->spinlock);
729
730         return sprintf(buf, "%#lx\n", val);
731 }
732
733 static ssize_t addr_single_store(struct device *dev,
734                                  struct device_attribute *attr,
735                                  const char *buf, size_t size)
736 {
737         u8 idx;
738         int ret;
739         unsigned long val;
740         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
741
742         ret = kstrtoul(buf, 16, &val);
743         if (ret)
744                 return ret;
745
746         spin_lock(&drvdata->spinlock);
747         idx = drvdata->addr_idx;
748         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
749               drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
750                 spin_unlock(&drvdata->spinlock);
751                 return -EINVAL;
752         }
753
754         drvdata->addr_val[idx] = val;
755         drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
756         spin_unlock(&drvdata->spinlock);
757
758         return size;
759 }
760 static DEVICE_ATTR_RW(addr_single);
761
762 static ssize_t addr_range_show(struct device *dev,
763                                struct device_attribute *attr, char *buf)
764 {
765         u8 idx;
766         unsigned long val1, val2;
767         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
768
769         spin_lock(&drvdata->spinlock);
770         idx = drvdata->addr_idx;
771         if (idx % 2 != 0) {
772                 spin_unlock(&drvdata->spinlock);
773                 return -EPERM;
774         }
775         if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
776                drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
777               (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
778                drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
779                 spin_unlock(&drvdata->spinlock);
780                 return -EPERM;
781         }
782
783         val1 = drvdata->addr_val[idx];
784         val2 = drvdata->addr_val[idx + 1];
785         spin_unlock(&drvdata->spinlock);
786
787         return sprintf(buf, "%#lx %#lx\n", val1, val2);
788 }
789
790 static ssize_t addr_range_store(struct device *dev,
791                               struct device_attribute *attr,
792                               const char *buf, size_t size)
793 {
794         u8 idx;
795         unsigned long val1, val2;
796         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
797
798         if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
799                 return -EINVAL;
800         /* Lower address comparator cannot have a higher address value */
801         if (val1 > val2)
802                 return -EINVAL;
803
804         spin_lock(&drvdata->spinlock);
805         idx = drvdata->addr_idx;
806         if (idx % 2 != 0) {
807                 spin_unlock(&drvdata->spinlock);
808                 return -EPERM;
809         }
810         if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
811                drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
812               (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
813                drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
814                 spin_unlock(&drvdata->spinlock);
815                 return -EPERM;
816         }
817
818         drvdata->addr_val[idx] = val1;
819         drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
820         drvdata->addr_val[idx + 1] = val2;
821         drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
822         drvdata->enable_ctrl1 |= (1 << (idx/2));
823         spin_unlock(&drvdata->spinlock);
824
825         return size;
826 }
827 static DEVICE_ATTR_RW(addr_range);
828
829 static ssize_t addr_start_show(struct device *dev,
830                                struct device_attribute *attr, char *buf)
831 {
832         u8 idx;
833         unsigned long val;
834         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
835
836         spin_lock(&drvdata->spinlock);
837         idx = drvdata->addr_idx;
838         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
839               drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
840                 spin_unlock(&drvdata->spinlock);
841                 return -EPERM;
842         }
843
844         val = drvdata->addr_val[idx];
845         spin_unlock(&drvdata->spinlock);
846
847         return sprintf(buf, "%#lx\n", val);
848 }
849
850 static ssize_t addr_start_store(struct device *dev,
851                                 struct device_attribute *attr,
852                                 const char *buf, size_t size)
853 {
854         u8 idx;
855         int ret;
856         unsigned long val;
857         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
858
859         ret = kstrtoul(buf, 16, &val);
860         if (ret)
861                 return ret;
862
863         spin_lock(&drvdata->spinlock);
864         idx = drvdata->addr_idx;
865         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
866               drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
867                 spin_unlock(&drvdata->spinlock);
868                 return -EPERM;
869         }
870
871         drvdata->addr_val[idx] = val;
872         drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
873         drvdata->startstop_ctrl |= (1 << idx);
874         drvdata->enable_ctrl1 |= BIT(25);
875         spin_unlock(&drvdata->spinlock);
876
877         return size;
878 }
879 static DEVICE_ATTR_RW(addr_start);
880
881 static ssize_t addr_stop_show(struct device *dev,
882                               struct device_attribute *attr, char *buf)
883 {
884         u8 idx;
885         unsigned long val;
886         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
887
888         spin_lock(&drvdata->spinlock);
889         idx = drvdata->addr_idx;
890         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
891               drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
892                 spin_unlock(&drvdata->spinlock);
893                 return -EPERM;
894         }
895
896         val = drvdata->addr_val[idx];
897         spin_unlock(&drvdata->spinlock);
898
899         return sprintf(buf, "%#lx\n", val);
900 }
901
902 static ssize_t addr_stop_store(struct device *dev,
903                                struct device_attribute *attr,
904                                const char *buf, size_t size)
905 {
906         u8 idx;
907         int ret;
908         unsigned long val;
909         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
910
911         ret = kstrtoul(buf, 16, &val);
912         if (ret)
913                 return ret;
914
915         spin_lock(&drvdata->spinlock);
916         idx = drvdata->addr_idx;
917         if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
918               drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
919                 spin_unlock(&drvdata->spinlock);
920                 return -EPERM;
921         }
922
923         drvdata->addr_val[idx] = val;
924         drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
925         drvdata->startstop_ctrl |= (1 << (idx + 16));
926         drvdata->enable_ctrl1 |= ETMTECR1_START_STOP;
927         spin_unlock(&drvdata->spinlock);
928
929         return size;
930 }
931 static DEVICE_ATTR_RW(addr_stop);
932
933 static ssize_t addr_acctype_show(struct device *dev,
934                                  struct device_attribute *attr, char *buf)
935 {
936         unsigned long val;
937         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
938
939         spin_lock(&drvdata->spinlock);
940         val = drvdata->addr_acctype[drvdata->addr_idx];
941         spin_unlock(&drvdata->spinlock);
942
943         return sprintf(buf, "%#lx\n", val);
944 }
945
946 static ssize_t addr_acctype_store(struct device *dev,
947                                   struct device_attribute *attr,
948                                   const char *buf, size_t size)
949 {
950         int ret;
951         unsigned long val;
952         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
953
954         ret = kstrtoul(buf, 16, &val);
955         if (ret)
956                 return ret;
957
958         spin_lock(&drvdata->spinlock);
959         drvdata->addr_acctype[drvdata->addr_idx] = val;
960         spin_unlock(&drvdata->spinlock);
961
962         return size;
963 }
964 static DEVICE_ATTR_RW(addr_acctype);
965
966 static ssize_t cntr_idx_show(struct device *dev,
967                              struct device_attribute *attr, char *buf)
968 {
969         unsigned long val;
970         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
971
972         val = drvdata->cntr_idx;
973         return sprintf(buf, "%#lx\n", val);
974 }
975
976 static ssize_t cntr_idx_store(struct device *dev,
977                               struct device_attribute *attr,
978                               const char *buf, size_t size)
979 {
980         int ret;
981         unsigned long val;
982         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
983
984         ret = kstrtoul(buf, 16, &val);
985         if (ret)
986                 return ret;
987
988         if (val >= drvdata->nr_cntr)
989                 return -EINVAL;
990         /*
991          * Use spinlock to ensure index doesn't change while it gets
992          * dereferenced multiple times within a spinlock block elsewhere.
993          */
994         spin_lock(&drvdata->spinlock);
995         drvdata->cntr_idx = val;
996         spin_unlock(&drvdata->spinlock);
997
998         return size;
999 }
1000 static DEVICE_ATTR_RW(cntr_idx);
1001
1002 static ssize_t cntr_rld_val_show(struct device *dev,
1003                                  struct device_attribute *attr, char *buf)
1004 {
1005         unsigned long val;
1006         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1007
1008         spin_lock(&drvdata->spinlock);
1009         val = drvdata->cntr_rld_val[drvdata->cntr_idx];
1010         spin_unlock(&drvdata->spinlock);
1011
1012         return sprintf(buf, "%#lx\n", val);
1013 }
1014
1015 static ssize_t cntr_rld_val_store(struct device *dev,
1016                                   struct device_attribute *attr,
1017                                   const char *buf, size_t size)
1018 {
1019         int ret;
1020         unsigned long val;
1021         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1022
1023         ret = kstrtoul(buf, 16, &val);
1024         if (ret)
1025                 return ret;
1026
1027         spin_lock(&drvdata->spinlock);
1028         drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
1029         spin_unlock(&drvdata->spinlock);
1030
1031         return size;
1032 }
1033 static DEVICE_ATTR_RW(cntr_rld_val);
1034
1035 static ssize_t cntr_event_show(struct device *dev,
1036                                struct device_attribute *attr, char *buf)
1037 {
1038         unsigned long val;
1039         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1040
1041         spin_lock(&drvdata->spinlock);
1042         val = drvdata->cntr_event[drvdata->cntr_idx];
1043         spin_unlock(&drvdata->spinlock);
1044
1045         return sprintf(buf, "%#lx\n", val);
1046 }
1047
1048 static ssize_t cntr_event_store(struct device *dev,
1049                                 struct device_attribute *attr,
1050                                 const char *buf, size_t size)
1051 {
1052         int ret;
1053         unsigned long val;
1054         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1055
1056         ret = kstrtoul(buf, 16, &val);
1057         if (ret)
1058                 return ret;
1059
1060         spin_lock(&drvdata->spinlock);
1061         drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
1062         spin_unlock(&drvdata->spinlock);
1063
1064         return size;
1065 }
1066 static DEVICE_ATTR_RW(cntr_event);
1067
1068 static ssize_t cntr_rld_event_show(struct device *dev,
1069                                    struct device_attribute *attr, char *buf)
1070 {
1071         unsigned long val;
1072         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1073
1074         spin_lock(&drvdata->spinlock);
1075         val = drvdata->cntr_rld_event[drvdata->cntr_idx];
1076         spin_unlock(&drvdata->spinlock);
1077
1078         return sprintf(buf, "%#lx\n", val);
1079 }
1080
1081 static ssize_t cntr_rld_event_store(struct device *dev,
1082                                     struct device_attribute *attr,
1083                                     const char *buf, size_t size)
1084 {
1085         int ret;
1086         unsigned long val;
1087         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1088
1089         ret = kstrtoul(buf, 16, &val);
1090         if (ret)
1091                 return ret;
1092
1093         spin_lock(&drvdata->spinlock);
1094         drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
1095         spin_unlock(&drvdata->spinlock);
1096
1097         return size;
1098 }
1099 static DEVICE_ATTR_RW(cntr_rld_event);
1100
1101 static ssize_t cntr_val_show(struct device *dev,
1102                              struct device_attribute *attr, char *buf)
1103 {
1104         int i, ret = 0;
1105         u32 val;
1106         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1107
1108         if (!drvdata->enable) {
1109                 spin_lock(&drvdata->spinlock);
1110                 for (i = 0; i < drvdata->nr_cntr; i++)
1111                         ret += sprintf(buf, "counter %d: %x\n",
1112                                        i, drvdata->cntr_val[i]);
1113                 spin_unlock(&drvdata->spinlock);
1114                 return ret;
1115         }
1116
1117         for (i = 0; i < drvdata->nr_cntr; i++) {
1118                 val = etm_readl(drvdata, ETMCNTVRn(i));
1119                 ret += sprintf(buf, "counter %d: %x\n", i, val);
1120         }
1121
1122         return ret;
1123 }
1124
1125 static ssize_t cntr_val_store(struct device *dev,
1126                               struct device_attribute *attr,
1127                               const char *buf, size_t size)
1128 {
1129         int ret;
1130         unsigned long val;
1131         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1132
1133         ret = kstrtoul(buf, 16, &val);
1134         if (ret)
1135                 return ret;
1136
1137         spin_lock(&drvdata->spinlock);
1138         drvdata->cntr_val[drvdata->cntr_idx] = val;
1139         spin_unlock(&drvdata->spinlock);
1140
1141         return size;
1142 }
1143 static DEVICE_ATTR_RW(cntr_val);
1144
1145 static ssize_t seq_12_event_show(struct device *dev,
1146                                  struct device_attribute *attr, char *buf)
1147 {
1148         unsigned long val;
1149         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1150
1151         val = drvdata->seq_12_event;
1152         return sprintf(buf, "%#lx\n", val);
1153 }
1154
1155 static ssize_t seq_12_event_store(struct device *dev,
1156                                   struct device_attribute *attr,
1157                                   const char *buf, size_t size)
1158 {
1159         int ret;
1160         unsigned long val;
1161         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1162
1163         ret = kstrtoul(buf, 16, &val);
1164         if (ret)
1165                 return ret;
1166
1167         drvdata->seq_12_event = val & ETM_EVENT_MASK;
1168         return size;
1169 }
1170 static DEVICE_ATTR_RW(seq_12_event);
1171
1172 static ssize_t seq_21_event_show(struct device *dev,
1173                                  struct device_attribute *attr, char *buf)
1174 {
1175         unsigned long val;
1176         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1177
1178         val = drvdata->seq_21_event;
1179         return sprintf(buf, "%#lx\n", val);
1180 }
1181
1182 static ssize_t seq_21_event_store(struct device *dev,
1183                                   struct device_attribute *attr,
1184                                   const char *buf, size_t size)
1185 {
1186         int ret;
1187         unsigned long val;
1188         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1189
1190         ret = kstrtoul(buf, 16, &val);
1191         if (ret)
1192                 return ret;
1193
1194         drvdata->seq_21_event = val & ETM_EVENT_MASK;
1195         return size;
1196 }
1197 static DEVICE_ATTR_RW(seq_21_event);
1198
1199 static ssize_t seq_23_event_show(struct device *dev,
1200                                  struct device_attribute *attr, char *buf)
1201 {
1202         unsigned long val;
1203         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1204
1205         val = drvdata->seq_23_event;
1206         return sprintf(buf, "%#lx\n", val);
1207 }
1208
1209 static ssize_t seq_23_event_store(struct device *dev,
1210                                   struct device_attribute *attr,
1211                                   const char *buf, size_t size)
1212 {
1213         int ret;
1214         unsigned long val;
1215         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1216
1217         ret = kstrtoul(buf, 16, &val);
1218         if (ret)
1219                 return ret;
1220
1221         drvdata->seq_23_event = val & ETM_EVENT_MASK;
1222         return size;
1223 }
1224 static DEVICE_ATTR_RW(seq_23_event);
1225
1226 static ssize_t seq_31_event_show(struct device *dev,
1227                                  struct device_attribute *attr, char *buf)
1228 {
1229         unsigned long val;
1230         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1231
1232         val = drvdata->seq_31_event;
1233         return sprintf(buf, "%#lx\n", val);
1234 }
1235
1236 static ssize_t seq_31_event_store(struct device *dev,
1237                                   struct device_attribute *attr,
1238                                   const char *buf, size_t size)
1239 {
1240         int ret;
1241         unsigned long val;
1242         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1243
1244         ret = kstrtoul(buf, 16, &val);
1245         if (ret)
1246                 return ret;
1247
1248         drvdata->seq_31_event = val & ETM_EVENT_MASK;
1249         return size;
1250 }
1251 static DEVICE_ATTR_RW(seq_31_event);
1252
1253 static ssize_t seq_32_event_show(struct device *dev,
1254                                  struct device_attribute *attr, char *buf)
1255 {
1256         unsigned long val;
1257         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1258
1259         val = drvdata->seq_32_event;
1260         return sprintf(buf, "%#lx\n", val);
1261 }
1262
1263 static ssize_t seq_32_event_store(struct device *dev,
1264                                   struct device_attribute *attr,
1265                                   const char *buf, size_t size)
1266 {
1267         int ret;
1268         unsigned long val;
1269         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1270
1271         ret = kstrtoul(buf, 16, &val);
1272         if (ret)
1273                 return ret;
1274
1275         drvdata->seq_32_event = val & ETM_EVENT_MASK;
1276         return size;
1277 }
1278 static DEVICE_ATTR_RW(seq_32_event);
1279
1280 static ssize_t seq_13_event_show(struct device *dev,
1281                                  struct device_attribute *attr, char *buf)
1282 {
1283         unsigned long val;
1284         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1285
1286         val = drvdata->seq_13_event;
1287         return sprintf(buf, "%#lx\n", val);
1288 }
1289
1290 static ssize_t seq_13_event_store(struct device *dev,
1291                                   struct device_attribute *attr,
1292                                   const char *buf, size_t size)
1293 {
1294         int ret;
1295         unsigned long val;
1296         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1297
1298         ret = kstrtoul(buf, 16, &val);
1299         if (ret)
1300                 return ret;
1301
1302         drvdata->seq_13_event = val & ETM_EVENT_MASK;
1303         return size;
1304 }
1305 static DEVICE_ATTR_RW(seq_13_event);
1306
1307 static ssize_t seq_curr_state_show(struct device *dev,
1308                                    struct device_attribute *attr, char *buf)
1309 {
1310         unsigned long val, flags;
1311         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1312
1313         if (!drvdata->enable) {
1314                 val = drvdata->seq_curr_state;
1315                 goto out;
1316         }
1317
1318         pm_runtime_get_sync(drvdata->dev);
1319         spin_lock_irqsave(&drvdata->spinlock, flags);
1320
1321         CS_UNLOCK(drvdata->base);
1322         val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
1323         CS_LOCK(drvdata->base);
1324
1325         spin_unlock_irqrestore(&drvdata->spinlock, flags);
1326         pm_runtime_put(drvdata->dev);
1327 out:
1328         return sprintf(buf, "%#lx\n", val);
1329 }
1330
1331 static ssize_t seq_curr_state_store(struct device *dev,
1332                                     struct device_attribute *attr,
1333                                     const char *buf, size_t size)
1334 {
1335         int ret;
1336         unsigned long val;
1337         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1338
1339         ret = kstrtoul(buf, 16, &val);
1340         if (ret)
1341                 return ret;
1342
1343         if (val > ETM_SEQ_STATE_MAX_VAL)
1344                 return -EINVAL;
1345
1346         drvdata->seq_curr_state = val;
1347
1348         return size;
1349 }
1350 static DEVICE_ATTR_RW(seq_curr_state);
1351
1352 static ssize_t ctxid_idx_show(struct device *dev,
1353                               struct device_attribute *attr, char *buf)
1354 {
1355         unsigned long val;
1356         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1357
1358         val = drvdata->ctxid_idx;
1359         return sprintf(buf, "%#lx\n", val);
1360 }
1361
1362 static ssize_t ctxid_idx_store(struct device *dev,
1363                                 struct device_attribute *attr,
1364                                 const char *buf, size_t size)
1365 {
1366         int ret;
1367         unsigned long val;
1368         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1369
1370         ret = kstrtoul(buf, 16, &val);
1371         if (ret)
1372                 return ret;
1373
1374         if (val >= drvdata->nr_ctxid_cmp)
1375                 return -EINVAL;
1376
1377         /*
1378          * Use spinlock to ensure index doesn't change while it gets
1379          * dereferenced multiple times within a spinlock block elsewhere.
1380          */
1381         spin_lock(&drvdata->spinlock);
1382         drvdata->ctxid_idx = val;
1383         spin_unlock(&drvdata->spinlock);
1384
1385         return size;
1386 }
1387 static DEVICE_ATTR_RW(ctxid_idx);
1388
1389 static ssize_t ctxid_val_show(struct device *dev,
1390                               struct device_attribute *attr, char *buf)
1391 {
1392         unsigned long val;
1393         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1394
1395         spin_lock(&drvdata->spinlock);
1396         val = drvdata->ctxid_val[drvdata->ctxid_idx];
1397         spin_unlock(&drvdata->spinlock);
1398
1399         return sprintf(buf, "%#lx\n", val);
1400 }
1401
1402 static ssize_t ctxid_val_store(struct device *dev,
1403                                struct device_attribute *attr,
1404                                const char *buf, size_t size)
1405 {
1406         int ret;
1407         unsigned long val;
1408         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1409
1410         ret = kstrtoul(buf, 16, &val);
1411         if (ret)
1412                 return ret;
1413
1414         spin_lock(&drvdata->spinlock);
1415         drvdata->ctxid_val[drvdata->ctxid_idx] = val;
1416         spin_unlock(&drvdata->spinlock);
1417
1418         return size;
1419 }
1420 static DEVICE_ATTR_RW(ctxid_val);
1421
1422 static ssize_t ctxid_mask_show(struct device *dev,
1423                                struct device_attribute *attr, char *buf)
1424 {
1425         unsigned long val;
1426         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1427
1428         val = drvdata->ctxid_mask;
1429         return sprintf(buf, "%#lx\n", val);
1430 }
1431
1432 static ssize_t ctxid_mask_store(struct device *dev,
1433                                 struct device_attribute *attr,
1434                                 const char *buf, size_t size)
1435 {
1436         int ret;
1437         unsigned long val;
1438         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1439
1440         ret = kstrtoul(buf, 16, &val);
1441         if (ret)
1442                 return ret;
1443
1444         drvdata->ctxid_mask = val;
1445         return size;
1446 }
1447 static DEVICE_ATTR_RW(ctxid_mask);
1448
1449 static ssize_t sync_freq_show(struct device *dev,
1450                               struct device_attribute *attr, char *buf)
1451 {
1452         unsigned long val;
1453         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1454
1455         val = drvdata->sync_freq;
1456         return sprintf(buf, "%#lx\n", val);
1457 }
1458
1459 static ssize_t sync_freq_store(struct device *dev,
1460                                struct device_attribute *attr,
1461                                const char *buf, size_t size)
1462 {
1463         int ret;
1464         unsigned long val;
1465         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1466
1467         ret = kstrtoul(buf, 16, &val);
1468         if (ret)
1469                 return ret;
1470
1471         drvdata->sync_freq = val & ETM_SYNC_MASK;
1472         return size;
1473 }
1474 static DEVICE_ATTR_RW(sync_freq);
1475
1476 static ssize_t timestamp_event_show(struct device *dev,
1477                                     struct device_attribute *attr, char *buf)
1478 {
1479         unsigned long val;
1480         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1481
1482         val = drvdata->timestamp_event;
1483         return sprintf(buf, "%#lx\n", val);
1484 }
1485
1486 static ssize_t timestamp_event_store(struct device *dev,
1487                                      struct device_attribute *attr,
1488                                      const char *buf, size_t size)
1489 {
1490         int ret;
1491         unsigned long val;
1492         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1493
1494         ret = kstrtoul(buf, 16, &val);
1495         if (ret)
1496                 return ret;
1497
1498         drvdata->timestamp_event = val & ETM_EVENT_MASK;
1499         return size;
1500 }
1501 static DEVICE_ATTR_RW(timestamp_event);
1502
1503 static ssize_t status_show(struct device *dev,
1504                            struct device_attribute *attr, char *buf)
1505 {
1506         int ret;
1507         unsigned long flags;
1508         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1509
1510         pm_runtime_get_sync(drvdata->dev);
1511         spin_lock_irqsave(&drvdata->spinlock, flags);
1512
1513         CS_UNLOCK(drvdata->base);
1514         ret = sprintf(buf,
1515                       "ETMCCR: 0x%08x\n"
1516                       "ETMCCER: 0x%08x\n"
1517                       "ETMSCR: 0x%08x\n"
1518                       "ETMIDR: 0x%08x\n"
1519                       "ETMCR: 0x%08x\n"
1520                       "ETMTRACEIDR: 0x%08x\n"
1521                       "Enable event: 0x%08x\n"
1522                       "Enable start/stop: 0x%08x\n"
1523                       "Enable control: CR1 0x%08x CR2 0x%08x\n"
1524                       "CPU affinity: %d\n",
1525                       drvdata->etmccr, drvdata->etmccer,
1526                       etm_readl(drvdata, ETMSCR), etm_readl(drvdata, ETMIDR),
1527                       etm_readl(drvdata, ETMCR), etm_trace_id_simple(drvdata),
1528                       etm_readl(drvdata, ETMTEEVR),
1529                       etm_readl(drvdata, ETMTSSCR),
1530                       etm_readl(drvdata, ETMTECR1),
1531                       etm_readl(drvdata, ETMTECR2),
1532                       drvdata->cpu);
1533         CS_LOCK(drvdata->base);
1534
1535         spin_unlock_irqrestore(&drvdata->spinlock, flags);
1536         pm_runtime_put(drvdata->dev);
1537
1538         return ret;
1539 }
1540 static DEVICE_ATTR_RO(status);
1541
1542 static ssize_t traceid_show(struct device *dev,
1543                             struct device_attribute *attr, char *buf)
1544 {
1545         unsigned long val, flags;
1546         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1547
1548         if (!drvdata->enable) {
1549                 val = drvdata->traceid;
1550                 goto out;
1551         }
1552
1553         pm_runtime_get_sync(drvdata->dev);
1554         spin_lock_irqsave(&drvdata->spinlock, flags);
1555         CS_UNLOCK(drvdata->base);
1556
1557         val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
1558
1559         CS_LOCK(drvdata->base);
1560         spin_unlock_irqrestore(&drvdata->spinlock, flags);
1561         pm_runtime_put(drvdata->dev);
1562 out:
1563         return sprintf(buf, "%#lx\n", val);
1564 }
1565
1566 static ssize_t traceid_store(struct device *dev,
1567                              struct device_attribute *attr,
1568                              const char *buf, size_t size)
1569 {
1570         int ret;
1571         unsigned long val;
1572         struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1573
1574         ret = kstrtoul(buf, 16, &val);
1575         if (ret)
1576                 return ret;
1577
1578         drvdata->traceid = val & ETM_TRACEID_MASK;
1579         return size;
1580 }
1581 static DEVICE_ATTR_RW(traceid);
1582
1583 static struct attribute *coresight_etm_attrs[] = {
1584         &dev_attr_nr_addr_cmp.attr,
1585         &dev_attr_nr_cntr.attr,
1586         &dev_attr_nr_ctxid_cmp.attr,
1587         &dev_attr_etmsr.attr,
1588         &dev_attr_reset.attr,
1589         &dev_attr_mode.attr,
1590         &dev_attr_trigger_event.attr,
1591         &dev_attr_enable_event.attr,
1592         &dev_attr_fifofull_level.attr,
1593         &dev_attr_addr_idx.attr,
1594         &dev_attr_addr_single.attr,
1595         &dev_attr_addr_range.attr,
1596         &dev_attr_addr_start.attr,
1597         &dev_attr_addr_stop.attr,
1598         &dev_attr_addr_acctype.attr,
1599         &dev_attr_cntr_idx.attr,
1600         &dev_attr_cntr_rld_val.attr,
1601         &dev_attr_cntr_event.attr,
1602         &dev_attr_cntr_rld_event.attr,
1603         &dev_attr_cntr_val.attr,
1604         &dev_attr_seq_12_event.attr,
1605         &dev_attr_seq_21_event.attr,
1606         &dev_attr_seq_23_event.attr,
1607         &dev_attr_seq_31_event.attr,
1608         &dev_attr_seq_32_event.attr,
1609         &dev_attr_seq_13_event.attr,
1610         &dev_attr_seq_curr_state.attr,
1611         &dev_attr_ctxid_idx.attr,
1612         &dev_attr_ctxid_val.attr,
1613         &dev_attr_ctxid_mask.attr,
1614         &dev_attr_sync_freq.attr,
1615         &dev_attr_timestamp_event.attr,
1616         &dev_attr_status.attr,
1617         &dev_attr_traceid.attr,
1618         NULL,
1619 };
1620 ATTRIBUTE_GROUPS(coresight_etm);
1621
1622 static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
1623                             void *hcpu)
1624 {
1625         unsigned int cpu = (unsigned long)hcpu;
1626
1627         if (!etmdrvdata[cpu])
1628                 goto out;
1629
1630         switch (action & (~CPU_TASKS_FROZEN)) {
1631         case CPU_STARTING:
1632                 spin_lock(&etmdrvdata[cpu]->spinlock);
1633                 if (!etmdrvdata[cpu]->os_unlock) {
1634                         etm_os_unlock(etmdrvdata[cpu]);
1635                         etmdrvdata[cpu]->os_unlock = true;
1636                 }
1637
1638                 if (etmdrvdata[cpu]->enable)
1639                         etm_enable_hw(etmdrvdata[cpu]);
1640                 spin_unlock(&etmdrvdata[cpu]->spinlock);
1641                 break;
1642
1643         case CPU_ONLINE:
1644                 if (etmdrvdata[cpu]->boot_enable &&
1645                     !etmdrvdata[cpu]->sticky_enable)
1646                         coresight_enable(etmdrvdata[cpu]->csdev);
1647                 break;
1648
1649         case CPU_DYING:
1650                 spin_lock(&etmdrvdata[cpu]->spinlock);
1651                 if (etmdrvdata[cpu]->enable)
1652                         etm_disable_hw(etmdrvdata[cpu]);
1653                 spin_unlock(&etmdrvdata[cpu]->spinlock);
1654                 break;
1655         }
1656 out:
1657         return NOTIFY_OK;
1658 }
1659
1660 static struct notifier_block etm_cpu_notifier = {
1661         .notifier_call = etm_cpu_callback,
1662 };
1663
1664 static bool etm_arch_supported(u8 arch)
1665 {
1666         switch (arch) {
1667         case ETM_ARCH_V3_3:
1668                 break;
1669         case ETM_ARCH_V3_5:
1670                 break;
1671         case PFT_ARCH_V1_0:
1672                 break;
1673         case PFT_ARCH_V1_1:
1674                 break;
1675         default:
1676                 return false;
1677         }
1678         return true;
1679 }
1680
1681 static void etm_init_arch_data(void *info)
1682 {
1683         u32 etmidr;
1684         u32 etmccr;
1685         struct etm_drvdata *drvdata = info;
1686
1687         CS_UNLOCK(drvdata->base);
1688
1689         /* First dummy read */
1690         (void)etm_readl(drvdata, ETMPDSR);
1691         /* Provide power to ETM: ETMPDCR[3] == 1 */
1692         etm_set_pwrup(drvdata);
1693         /*
1694          * Clear power down bit since when this bit is set writes to
1695          * certain registers might be ignored.
1696          */
1697         etm_clr_pwrdwn(drvdata);
1698         /*
1699          * Set prog bit. It will be set from reset but this is included to
1700          * ensure it is set
1701          */
1702         etm_set_prog(drvdata);
1703
1704         /* Find all capabilities */
1705         etmidr = etm_readl(drvdata, ETMIDR);
1706         drvdata->arch = BMVAL(etmidr, 4, 11);
1707         drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK;
1708
1709         drvdata->etmccer = etm_readl(drvdata, ETMCCER);
1710         etmccr = etm_readl(drvdata, ETMCCR);
1711         drvdata->etmccr = etmccr;
1712         drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
1713         drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
1714         drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
1715         drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
1716         drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
1717
1718         etm_set_pwrdwn(drvdata);
1719         etm_clr_pwrup(drvdata);
1720         CS_LOCK(drvdata->base);
1721 }
1722
1723 static void etm_init_default_data(struct etm_drvdata *drvdata)
1724 {
1725         /*
1726          * A trace ID of value 0 is invalid, so let's start at some
1727          * random value that fits in 7 bits and will be just as good.
1728          */
1729         static int etm3x_traceid = 0x10;
1730
1731         u32 flags = (1 << 0 | /* instruction execute*/
1732                      3 << 3 | /* ARM instruction */
1733                      0 << 5 | /* No data value comparison */
1734                      0 << 7 | /* No exact mach */
1735                      0 << 8 | /* Ignore context ID */
1736                      0 << 10); /* Security ignored */
1737
1738         /*
1739          * Initial configuration only - guarantees sources handled by
1740          * this driver have a unique ID at startup time but not between
1741          * all other types of sources.  For that we lean on the core
1742          * framework.
1743          */
1744         drvdata->traceid = etm3x_traceid++;
1745         drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN);
1746         drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
1747         if (drvdata->nr_addr_cmp >= 2) {
1748                 drvdata->addr_val[0] = (u32) _stext;
1749                 drvdata->addr_val[1] = (u32) _etext;
1750                 drvdata->addr_acctype[0] = flags;
1751                 drvdata->addr_acctype[1] = flags;
1752                 drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
1753                 drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
1754         }
1755
1756         etm_set_default(drvdata);
1757 }
1758
1759 static int etm_probe(struct amba_device *adev, const struct amba_id *id)
1760 {
1761         int ret;
1762         void __iomem *base;
1763         struct device *dev = &adev->dev;
1764         struct coresight_platform_data *pdata = NULL;
1765         struct etm_drvdata *drvdata;
1766         struct resource *res = &adev->res;
1767         struct coresight_desc *desc;
1768         struct device_node *np = adev->dev.of_node;
1769
1770         desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
1771         if (!desc)
1772                 return -ENOMEM;
1773
1774         drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
1775         if (!drvdata)
1776                 return -ENOMEM;
1777
1778         if (np) {
1779                 pdata = of_get_coresight_platform_data(dev, np);
1780                 if (IS_ERR(pdata))
1781                         return PTR_ERR(pdata);
1782
1783                 adev->dev.platform_data = pdata;
1784                 drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14");
1785         }
1786
1787         drvdata->dev = &adev->dev;
1788         dev_set_drvdata(dev, drvdata);
1789
1790         /* Validity for the resource is already checked by the AMBA core */
1791         base = devm_ioremap_resource(dev, res);
1792         if (IS_ERR(base))
1793                 return PTR_ERR(base);
1794
1795         drvdata->base = base;
1796
1797         spin_lock_init(&drvdata->spinlock);
1798
1799         drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
1800         if (!IS_ERR(drvdata->atclk)) {
1801                 ret = clk_prepare_enable(drvdata->atclk);
1802                 if (ret)
1803                         return ret;
1804         }
1805
1806         drvdata->cpu = pdata ? pdata->cpu : 0;
1807
1808         get_online_cpus();
1809         etmdrvdata[drvdata->cpu] = drvdata;
1810
1811         if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
1812                 drvdata->os_unlock = true;
1813
1814         if (smp_call_function_single(drvdata->cpu,
1815                                      etm_init_arch_data,  drvdata, 1))
1816                 dev_err(dev, "ETM arch init failed\n");
1817
1818         if (!etm_count++)
1819                 register_hotcpu_notifier(&etm_cpu_notifier);
1820
1821         put_online_cpus();
1822
1823         if (etm_arch_supported(drvdata->arch) == false) {
1824                 ret = -EINVAL;
1825                 goto err_arch_supported;
1826         }
1827         etm_init_default_data(drvdata);
1828
1829         desc->type = CORESIGHT_DEV_TYPE_SOURCE;
1830         desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
1831         desc->ops = &etm_cs_ops;
1832         desc->pdata = pdata;
1833         desc->dev = dev;
1834         desc->groups = coresight_etm_groups;
1835         drvdata->csdev = coresight_register(desc);
1836         if (IS_ERR(drvdata->csdev)) {
1837                 ret = PTR_ERR(drvdata->csdev);
1838                 goto err_arch_supported;
1839         }
1840
1841         pm_runtime_put(&adev->dev);
1842         dev_info(dev, "%s initialized\n", (char *)id->data);
1843
1844         if (boot_enable) {
1845                 coresight_enable(drvdata->csdev);
1846                 drvdata->boot_enable = true;
1847         }
1848
1849         return 0;
1850
1851 err_arch_supported:
1852         if (--etm_count == 0)
1853                 unregister_hotcpu_notifier(&etm_cpu_notifier);
1854         return ret;
1855 }
1856
1857 static int etm_remove(struct amba_device *adev)
1858 {
1859         struct etm_drvdata *drvdata = amba_get_drvdata(adev);
1860
1861         coresight_unregister(drvdata->csdev);
1862         if (--etm_count == 0)
1863                 unregister_hotcpu_notifier(&etm_cpu_notifier);
1864
1865         return 0;
1866 }
1867
1868 #ifdef CONFIG_PM
1869 static int etm_runtime_suspend(struct device *dev)
1870 {
1871         struct etm_drvdata *drvdata = dev_get_drvdata(dev);
1872
1873         if (drvdata && !IS_ERR(drvdata->atclk))
1874                 clk_disable_unprepare(drvdata->atclk);
1875
1876         return 0;
1877 }
1878
1879 static int etm_runtime_resume(struct device *dev)
1880 {
1881         struct etm_drvdata *drvdata = dev_get_drvdata(dev);
1882
1883         if (drvdata && !IS_ERR(drvdata->atclk))
1884                 clk_prepare_enable(drvdata->atclk);
1885
1886         return 0;
1887 }
1888 #endif
1889
1890 static const struct dev_pm_ops etm_dev_pm_ops = {
1891         SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL)
1892 };
1893
1894 static struct amba_id etm_ids[] = {
1895         {       /* ETM 3.3 */
1896                 .id     = 0x0003b921,
1897                 .mask   = 0x0003ffff,
1898                 .data   = "ETM 3.3",
1899         },
1900         {       /* ETM 3.5 */
1901                 .id     = 0x0003b956,
1902                 .mask   = 0x0003ffff,
1903                 .data   = "ETM 3.5",
1904         },
1905         {       /* PTM 1.0 */
1906                 .id     = 0x0003b950,
1907                 .mask   = 0x0003ffff,
1908                 .data   = "PTM 1.0",
1909         },
1910         {       /* PTM 1.1 */
1911                 .id     = 0x0003b95f,
1912                 .mask   = 0x0003ffff,
1913                 .data   = "PTM 1.1",
1914         },
1915         { 0, 0},
1916 };
1917
1918 static struct amba_driver etm_driver = {
1919         .drv = {
1920                 .name   = "coresight-etm3x",
1921                 .owner  = THIS_MODULE,
1922                 .pm     = &etm_dev_pm_ops,
1923         },
1924         .probe          = etm_probe,
1925         .remove         = etm_remove,
1926         .id_table       = etm_ids,
1927 };
1928
1929 module_amba_driver(etm_driver);
1930
1931 MODULE_LICENSE("GPL v2");
1932 MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");