2 * Copyright(C) 2015 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/coresight.h>
19 #include <linux/coresight-pmu.h>
20 #include <linux/cpumask.h>
21 #include <linux/device.h>
22 #include <linux/list.h>
24 #include <linux/init.h>
25 #include <linux/perf_event.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/workqueue.h>
30 #include "coresight-etm-perf.h"
31 #include "coresight-priv.h"
33 static struct pmu etm_pmu;
34 static bool etm_perf_up;
37 * struct etm_event_data - Coresight specifics associated to an event
38 * @work: Handle to free allocated memory outside IRQ context.
39 * @mask: Hold the CPU(s) this event was set for.
40 * @snk_config: The sink configuration.
41 * @path: An array of path, each slot for one CPU.
43 struct etm_event_data {
44 struct work_struct work;
47 struct list_head **path;
50 static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
51 static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
53 /* ETMv3.5/PTM's ETMCR is 'config' */
54 PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
55 PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
57 static struct attribute *etm_config_formats_attr[] = {
58 &format_attr_cycacc.attr,
59 &format_attr_timestamp.attr,
63 static struct attribute_group etm_pmu_format_group = {
65 .attrs = etm_config_formats_attr,
68 static const struct attribute_group *etm_pmu_attr_groups[] = {
69 &etm_pmu_format_group,
73 static void etm_event_read(struct perf_event *event) {}
75 static int etm_addr_filters_alloc(struct perf_event *event)
77 struct etm_filters *filters;
78 int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
80 filters = kzalloc_node(sizeof(struct etm_filters), GFP_KERNEL, node);
85 memcpy(filters, event->parent->hw.addr_filters,
88 event->hw.addr_filters = filters;
93 static void etm_event_destroy(struct perf_event *event)
95 kfree(event->hw.addr_filters);
96 event->hw.addr_filters = NULL;
99 static int etm_event_init(struct perf_event *event)
103 if (event->attr.type != etm_pmu.type) {
108 ret = etm_addr_filters_alloc(event);
112 event->destroy = etm_event_destroy;
117 static void free_event_data(struct work_struct *work)
121 struct etm_event_data *event_data;
122 struct coresight_device *sink;
124 event_data = container_of(work, struct etm_event_data, work);
125 mask = &event_data->mask;
127 * First deal with the sink configuration. See comment in
128 * etm_setup_aux() about why we take the first available path.
130 if (event_data->snk_config) {
131 cpu = cpumask_first(mask);
132 sink = coresight_get_sink(event_data->path[cpu]);
133 if (sink_ops(sink)->free_buffer)
134 sink_ops(sink)->free_buffer(event_data->snk_config);
137 for_each_cpu(cpu, mask) {
138 if (!(IS_ERR_OR_NULL(event_data->path[cpu])))
139 coresight_release_path(event_data->path[cpu]);
142 kfree(event_data->path);
146 static void *alloc_event_data(int cpu)
150 struct etm_event_data *event_data;
152 /* First get memory for the session's data */
153 event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
157 /* Make sure nothing disappears under us */
159 size = num_online_cpus();
161 mask = &event_data->mask;
163 cpumask_set_cpu(cpu, mask);
165 cpumask_copy(mask, cpu_online_mask);
169 * Each CPU has a single path between source and destination. As such
170 * allocate an array using CPU numbers as indexes. That way a path
171 * for any CPU can easily be accessed at any given time. We proceed
172 * the same way for sessions involving a single CPU. The cost of
173 * unused memory when dealing with single CPU trace scenarios is small
174 * compared to the cost of searching through an optimized array.
176 event_data->path = kcalloc(size,
177 sizeof(struct list_head *), GFP_KERNEL);
178 if (!event_data->path) {
186 static void etm_free_aux(void *data)
188 struct etm_event_data *event_data = data;
190 schedule_work(&event_data->work);
193 static void *etm_setup_aux(int event_cpu, void **pages,
194 int nr_pages, bool overwrite)
198 struct coresight_device *sink;
199 struct etm_event_data *event_data = NULL;
201 event_data = alloc_event_data(event_cpu);
206 * In theory nothing prevent tracers in a trace session from being
207 * associated with different sinks, nor having a sink per tracer. But
208 * until we have HW with this kind of topology we need to assume tracers
209 * in a trace session are using the same sink. Therefore go through
210 * the coresight bus and pick the first enabled sink.
212 * When operated from sysFS users are responsible to enable the sink
213 * while from perf, the perf tools will do it based on the choice made
214 * on the cmd line. As such the "enable_sink" flag in sysFS is reset.
216 sink = coresight_get_enabled_sink(true);
220 INIT_WORK(&event_data->work, free_event_data);
222 mask = &event_data->mask;
224 /* Setup the path for each CPU in a trace session */
225 for_each_cpu(cpu, mask) {
226 struct coresight_device *csdev;
228 csdev = per_cpu(csdev_src, cpu);
233 * Building a path doesn't enable it, it simply builds a
234 * list of devices from source to sink that can be
235 * referenced later when the path is actually needed.
237 event_data->path[cpu] = coresight_build_path(csdev, sink);
238 if (IS_ERR(event_data->path[cpu]))
242 if (!sink_ops(sink)->alloc_buffer)
245 cpu = cpumask_first(mask);
246 /* Get the AUX specific data from the sink buffer */
247 event_data->snk_config =
248 sink_ops(sink)->alloc_buffer(sink, cpu, pages,
249 nr_pages, overwrite);
250 if (!event_data->snk_config)
257 etm_free_aux(event_data);
262 static void etm_event_start(struct perf_event *event, int flags)
264 int cpu = smp_processor_id();
265 struct etm_event_data *event_data;
266 struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
267 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
273 * Deal with the ring buffer API and get a handle on the
274 * session's information.
276 event_data = perf_aux_output_begin(handle, event);
280 /* We need a sink, no need to continue without one */
281 sink = coresight_get_sink(event_data->path[cpu]);
282 if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer))
285 /* Configure the sink */
286 if (sink_ops(sink)->set_buffer(sink, handle,
287 event_data->snk_config))
290 /* Nothing will happen without a path */
291 if (coresight_enable_path(event_data->path[cpu], CS_MODE_PERF))
294 /* Tell the perf core the event is alive */
297 /* Finally enable the tracer */
298 if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
305 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
306 perf_aux_output_end(handle, 0);
308 event->hw.state = PERF_HES_STOPPED;
312 static void etm_event_stop(struct perf_event *event, int mode)
314 int cpu = smp_processor_id();
316 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
317 struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
318 struct etm_event_data *event_data = perf_get_aux(handle);
320 if (event->hw.state == PERF_HES_STOPPED)
326 sink = coresight_get_sink(event_data->path[cpu]);
331 source_ops(csdev)->disable(csdev, event);
334 event->hw.state = PERF_HES_STOPPED;
336 if (mode & PERF_EF_UPDATE) {
337 if (WARN_ON_ONCE(handle->event != event))
340 /* update trace information */
341 if (!sink_ops(sink)->update_buffer)
344 sink_ops(sink)->update_buffer(sink, handle,
345 event_data->snk_config);
347 if (!sink_ops(sink)->reset_buffer)
350 size = sink_ops(sink)->reset_buffer(sink, handle,
351 event_data->snk_config);
353 perf_aux_output_end(handle, size);
356 /* Disabling the path make its elements available to other sessions */
357 coresight_disable_path(event_data->path[cpu]);
360 static int etm_event_add(struct perf_event *event, int mode)
363 struct hw_perf_event *hwc = &event->hw;
365 if (mode & PERF_EF_START) {
366 etm_event_start(event, 0);
367 if (hwc->state & PERF_HES_STOPPED)
370 hwc->state = PERF_HES_STOPPED;
376 static void etm_event_del(struct perf_event *event, int mode)
378 etm_event_stop(event, PERF_EF_UPDATE);
381 static int etm_addr_filters_validate(struct list_head *filters)
383 bool range = false, address = false;
385 struct perf_addr_filter *filter;
387 list_for_each_entry(filter, filters, entry) {
389 * No need to go further if there's no more
392 if (++index > ETM_ADDR_CMP_MAX)
396 * As taken from the struct perf_addr_filter documentation:
397 * @range: 1: range, 0: address
399 * At this time we don't allow range and start/stop filtering
400 * to cohabitate, they have to be mutually exclusive.
402 if ((filter->range == 1) && address)
405 if ((filter->range == 0) && range)
409 * For range filtering, the second address in the address
410 * range comparator needs to be higher than the first.
413 if (filter->range && filter->size == 0)
417 * Everything checks out with this filter, record what we've
418 * received before moving on to the next one.
429 static void etm_addr_filters_sync(struct perf_event *event)
431 struct perf_addr_filters_head *head = perf_event_addr_filters(event);
432 unsigned long start, stop, *offs = event->addr_filters_offs;
433 struct etm_filters *filters = event->hw.addr_filters;
434 struct etm_filter *etm_filter;
435 struct perf_addr_filter *filter;
438 list_for_each_entry(filter, &head->list, entry) {
439 start = filter->offset + offs[i];
440 stop = start + filter->size;
441 etm_filter = &filters->etm_filter[i];
443 if (filter->range == 1) {
444 etm_filter->start_addr = start;
445 etm_filter->stop_addr = stop;
446 etm_filter->type = ETM_ADDR_TYPE_RANGE;
448 if (filter->filter == 1) {
449 etm_filter->start_addr = start;
450 etm_filter->type = ETM_ADDR_TYPE_START;
452 etm_filter->stop_addr = stop;
453 etm_filter->type = ETM_ADDR_TYPE_STOP;
459 filters->nr_filters = i;
462 int etm_perf_symlink(struct coresight_device *csdev, bool link)
464 char entry[sizeof("cpu9999999")];
465 int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
466 struct device *pmu_dev = etm_pmu.dev;
467 struct device *cs_dev = &csdev->dev;
469 sprintf(entry, "cpu%d", cpu);
472 return -EPROBE_DEFER;
475 ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
478 per_cpu(csdev_src, cpu) = csdev;
480 sysfs_remove_link(&pmu_dev->kobj, entry);
481 per_cpu(csdev_src, cpu) = NULL;
487 static int __init etm_perf_init(void)
491 etm_pmu.capabilities = PERF_PMU_CAP_EXCLUSIVE;
493 etm_pmu.attr_groups = etm_pmu_attr_groups;
494 etm_pmu.task_ctx_nr = perf_sw_context;
495 etm_pmu.read = etm_event_read;
496 etm_pmu.event_init = etm_event_init;
497 etm_pmu.setup_aux = etm_setup_aux;
498 etm_pmu.free_aux = etm_free_aux;
499 etm_pmu.start = etm_event_start;
500 etm_pmu.stop = etm_event_stop;
501 etm_pmu.add = etm_event_add;
502 etm_pmu.del = etm_event_del;
503 etm_pmu.addr_filters_sync = etm_addr_filters_sync;
504 etm_pmu.addr_filters_validate = etm_addr_filters_validate;
505 etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX;
507 ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
513 device_initcall(etm_perf_init);