]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/builtin-ftrace.c
982b98ee639edd3f0b199782d89f097cb253b8bc
[karo-tx-linux.git] / tools / perf / builtin-ftrace.c
1 /*
2  * builtin-ftrace.c
3  *
4  * Copyright (c) 2013  LG Electronics,  Namhyung Kim <namhyung@kernel.org>
5  *
6  * Released under the GPL v2.
7  */
8
9 #include "builtin.h"
10 #include "perf.h"
11
12 #include <errno.h>
13 #include <unistd.h>
14 #include <signal.h>
15 #include <fcntl.h>
16 #include <poll.h>
17
18 #include "debug.h"
19 #include <subcmd/parse-options.h>
20 #include <api/fs/tracing_path.h>
21 #include "evlist.h"
22 #include "target.h"
23 #include "cpumap.h"
24 #include "thread_map.h"
25 #include "util/config.h"
26
27
28 #define DEFAULT_TRACER  "function_graph"
29
30 struct perf_ftrace {
31         struct perf_evlist *evlist;
32         struct target target;
33         const char *tracer;
34 };
35
36 static bool done;
37
38 static void sig_handler(int sig __maybe_unused)
39 {
40         done = true;
41 }
42
43 /*
44  * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
45  * we asked by setting its exec_error to the function below,
46  * ftrace__workload_exec_failed_signal.
47  *
48  * XXX We need to handle this more appropriately, emitting an error, etc.
49  */
50 static void ftrace__workload_exec_failed_signal(int signo __maybe_unused,
51                                                 siginfo_t *info __maybe_unused,
52                                                 void *ucontext __maybe_unused)
53 {
54         /* workload_exec_errno = info->si_value.sival_int; */
55         done = true;
56 }
57
58 static int __write_tracing_file(const char *name, const char *val, bool append)
59 {
60         char *file;
61         int fd, ret = -1;
62         ssize_t size = strlen(val);
63         int flags = O_WRONLY;
64         char errbuf[512];
65
66         file = get_tracing_file(name);
67         if (!file) {
68                 pr_debug("cannot get tracing file: %s\n", name);
69                 return -1;
70         }
71
72         if (append)
73                 flags |= O_APPEND;
74         else
75                 flags |= O_TRUNC;
76
77         fd = open(file, flags);
78         if (fd < 0) {
79                 pr_debug("cannot open tracing file: %s: %s\n",
80                          name, str_error_r(errno, errbuf, sizeof(errbuf)));
81                 goto out;
82         }
83
84         if (write(fd, val, size) == size)
85                 ret = 0;
86         else
87                 pr_debug("write '%s' to tracing/%s failed: %s\n",
88                          val, name, str_error_r(errno, errbuf, sizeof(errbuf)));
89
90         close(fd);
91 out:
92         put_tracing_file(file);
93         return ret;
94 }
95
96 static int write_tracing_file(const char *name, const char *val)
97 {
98         return __write_tracing_file(name, val, false);
99 }
100
101 static int append_tracing_file(const char *name, const char *val)
102 {
103         return __write_tracing_file(name, val, true);
104 }
105
106 static int reset_tracing_cpu(void);
107
108 static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
109 {
110         if (write_tracing_file("tracing_on", "0") < 0)
111                 return -1;
112
113         if (write_tracing_file("current_tracer", "nop") < 0)
114                 return -1;
115
116         if (write_tracing_file("set_ftrace_pid", " ") < 0)
117                 return -1;
118
119         if (reset_tracing_cpu() < 0)
120                 return -1;
121
122         return 0;
123 }
124
125 static int set_tracing_pid(struct perf_ftrace *ftrace)
126 {
127         int i;
128         char buf[16];
129
130         if (target__has_cpu(&ftrace->target))
131                 return 0;
132
133         for (i = 0; i < thread_map__nr(ftrace->evlist->threads); i++) {
134                 scnprintf(buf, sizeof(buf), "%d",
135                           ftrace->evlist->threads->map[i]);
136                 if (append_tracing_file("set_ftrace_pid", buf) < 0)
137                         return -1;
138         }
139         return 0;
140 }
141
142 static int set_tracing_cpumask(struct cpu_map *cpumap)
143 {
144         char *cpumask;
145         size_t mask_size;
146         int ret;
147         int last_cpu;
148
149         last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
150         mask_size = (last_cpu + 3) / 4 + 1;
151         mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
152
153         cpumask = malloc(mask_size);
154         if (cpumask == NULL) {
155                 pr_debug("failed to allocate cpu mask\n");
156                 return -1;
157         }
158
159         cpu_map__snprint_mask(cpumap, cpumask, mask_size);
160
161         ret = write_tracing_file("tracing_cpumask", cpumask);
162
163         free(cpumask);
164         return ret;
165 }
166
167 static int set_tracing_cpu(struct perf_ftrace *ftrace)
168 {
169         struct cpu_map *cpumap = ftrace->evlist->cpus;
170
171         if (!target__has_cpu(&ftrace->target))
172                 return 0;
173
174         return set_tracing_cpumask(cpumap);
175 }
176
177 static int reset_tracing_cpu(void)
178 {
179         struct cpu_map *cpumap = cpu_map__new(NULL);
180         int ret;
181
182         ret = set_tracing_cpumask(cpumap);
183         cpu_map__put(cpumap);
184         return ret;
185 }
186
187 static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
188 {
189         char *trace_file;
190         int trace_fd;
191         char buf[4096];
192         struct pollfd pollfd = {
193                 .events = POLLIN,
194         };
195
196         if (geteuid() != 0) {
197                 pr_err("ftrace only works for root!\n");
198                 return -1;
199         }
200
201         signal(SIGINT, sig_handler);
202         signal(SIGUSR1, sig_handler);
203         signal(SIGCHLD, sig_handler);
204         signal(SIGPIPE, sig_handler);
205
206         if (reset_tracing_files(ftrace) < 0)
207                 goto out;
208
209         /* reset ftrace buffer */
210         if (write_tracing_file("trace", "0") < 0)
211                 goto out;
212
213         if (argc && perf_evlist__prepare_workload(ftrace->evlist,
214                                 &ftrace->target, argv, false,
215                                 ftrace__workload_exec_failed_signal) < 0) {
216                 goto out;
217         }
218
219         if (set_tracing_pid(ftrace) < 0) {
220                 pr_err("failed to set ftrace pid\n");
221                 goto out_reset;
222         }
223
224         if (set_tracing_cpu(ftrace) < 0) {
225                 pr_err("failed to set tracing cpumask\n");
226                 goto out_reset;
227         }
228
229         if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
230                 pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
231                 goto out_reset;
232         }
233
234         setup_pager();
235
236         trace_file = get_tracing_file("trace_pipe");
237         if (!trace_file) {
238                 pr_err("failed to open trace_pipe\n");
239                 goto out_reset;
240         }
241
242         trace_fd = open(trace_file, O_RDONLY);
243
244         put_tracing_file(trace_file);
245
246         if (trace_fd < 0) {
247                 pr_err("failed to open trace_pipe\n");
248                 goto out_reset;
249         }
250
251         fcntl(trace_fd, F_SETFL, O_NONBLOCK);
252         pollfd.fd = trace_fd;
253
254         if (write_tracing_file("tracing_on", "1") < 0) {
255                 pr_err("can't enable tracing\n");
256                 goto out_close_fd;
257         }
258
259         perf_evlist__start_workload(ftrace->evlist);
260
261         while (!done) {
262                 if (poll(&pollfd, 1, -1) < 0)
263                         break;
264
265                 if (pollfd.revents & POLLIN) {
266                         int n = read(trace_fd, buf, sizeof(buf));
267                         if (n < 0)
268                                 break;
269                         if (fwrite(buf, n, 1, stdout) != 1)
270                                 break;
271                 }
272         }
273
274         write_tracing_file("tracing_on", "0");
275
276         /* read remaining buffer contents */
277         while (true) {
278                 int n = read(trace_fd, buf, sizeof(buf));
279                 if (n <= 0)
280                         break;
281                 if (fwrite(buf, n, 1, stdout) != 1)
282                         break;
283         }
284
285 out_close_fd:
286         close(trace_fd);
287 out_reset:
288         reset_tracing_files(ftrace);
289 out:
290         return done ? 0 : -1;
291 }
292
293 static int perf_ftrace_config(const char *var, const char *value, void *cb)
294 {
295         struct perf_ftrace *ftrace = cb;
296
297         if (prefixcmp(var, "ftrace."))
298                 return 0;
299
300         if (strcmp(var, "ftrace.tracer"))
301                 return -1;
302
303         if (!strcmp(value, "function_graph") ||
304             !strcmp(value, "function")) {
305                 ftrace->tracer = value;
306                 return 0;
307         }
308
309         pr_err("Please select \"function_graph\" (default) or \"function\"\n");
310         return -1;
311 }
312
313 int cmd_ftrace(int argc, const char **argv)
314 {
315         int ret;
316         struct perf_ftrace ftrace = {
317                 .tracer = DEFAULT_TRACER,
318                 .target = { .uid = UINT_MAX, },
319         };
320         const char * const ftrace_usage[] = {
321                 "perf ftrace [<options>] [<command>]",
322                 "perf ftrace [<options>] -- <command> [<options>]",
323                 NULL
324         };
325         const struct option ftrace_options[] = {
326         OPT_STRING('t', "tracer", &ftrace.tracer, "tracer",
327                    "tracer to use: function_graph(default) or function"),
328         OPT_STRING('p', "pid", &ftrace.target.pid, "pid",
329                    "trace on existing process id"),
330         OPT_INCR('v', "verbose", &verbose,
331                  "be more verbose"),
332         OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide,
333                     "system-wide collection from all CPUs"),
334         OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu",
335                     "list of cpus to monitor"),
336         OPT_END()
337         };
338
339         ret = perf_config(perf_ftrace_config, &ftrace);
340         if (ret < 0)
341                 return -1;
342
343         argc = parse_options(argc, argv, ftrace_options, ftrace_usage,
344                             PARSE_OPT_STOP_AT_NON_OPTION);
345         if (!argc && target__none(&ftrace.target))
346                 usage_with_options(ftrace_usage, ftrace_options);
347
348         ret = target__validate(&ftrace.target);
349         if (ret) {
350                 char errbuf[512];
351
352                 target__strerror(&ftrace.target, ret, errbuf, 512);
353                 pr_err("%s\n", errbuf);
354                 return -EINVAL;
355         }
356
357         ftrace.evlist = perf_evlist__new();
358         if (ftrace.evlist == NULL)
359                 return -ENOMEM;
360
361         ret = perf_evlist__create_maps(ftrace.evlist, &ftrace.target);
362         if (ret < 0)
363                 goto out_delete_evlist;
364
365         ret = __cmd_ftrace(&ftrace, argc, argv);
366
367 out_delete_evlist:
368         perf_evlist__delete(ftrace.evlist);
369
370         return ret;
371 }