]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/util/record.c
Merge branches 'acpi-button' and 'acpi-tools'
[karo-tx-linux.git] / tools / perf / util / record.c
1 #include "evlist.h"
2 #include "evsel.h"
3 #include "cpumap.h"
4 #include "parse-events.h"
5 #include <errno.h>
6 #include <api/fs/fs.h>
7 #include "util.h"
8 #include "cloexec.h"
9
10 typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
11
12 static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
13 {
14         struct perf_evlist *evlist;
15         struct perf_evsel *evsel;
16         unsigned long flags = perf_event_open_cloexec_flag();
17         int err = -EAGAIN, fd;
18         static pid_t pid = -1;
19
20         evlist = perf_evlist__new();
21         if (!evlist)
22                 return -ENOMEM;
23
24         if (parse_events(evlist, str, NULL))
25                 goto out_delete;
26
27         evsel = perf_evlist__first(evlist);
28
29         while (1) {
30                 fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
31                 if (fd < 0) {
32                         if (pid == -1 && errno == EACCES) {
33                                 pid = 0;
34                                 continue;
35                         }
36                         goto out_delete;
37                 }
38                 break;
39         }
40         close(fd);
41
42         fn(evsel);
43
44         fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
45         if (fd < 0) {
46                 if (errno == EINVAL)
47                         err = -EINVAL;
48                 goto out_delete;
49         }
50         close(fd);
51         err = 0;
52
53 out_delete:
54         perf_evlist__delete(evlist);
55         return err;
56 }
57
58 static bool perf_probe_api(setup_probe_fn_t fn)
59 {
60         const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
61         struct cpu_map *cpus;
62         int cpu, ret, i = 0;
63
64         cpus = cpu_map__new(NULL);
65         if (!cpus)
66                 return false;
67         cpu = cpus->map[0];
68         cpu_map__put(cpus);
69
70         do {
71                 ret = perf_do_probe_api(fn, cpu, try[i++]);
72                 if (!ret)
73                         return true;
74         } while (ret == -EAGAIN && try[i]);
75
76         return false;
77 }
78
79 static void perf_probe_sample_identifier(struct perf_evsel *evsel)
80 {
81         evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
82 }
83
84 static void perf_probe_comm_exec(struct perf_evsel *evsel)
85 {
86         evsel->attr.comm_exec = 1;
87 }
88
89 static void perf_probe_context_switch(struct perf_evsel *evsel)
90 {
91         evsel->attr.context_switch = 1;
92 }
93
94 bool perf_can_sample_identifier(void)
95 {
96         return perf_probe_api(perf_probe_sample_identifier);
97 }
98
99 static bool perf_can_comm_exec(void)
100 {
101         return perf_probe_api(perf_probe_comm_exec);
102 }
103
104 bool perf_can_record_switch_events(void)
105 {
106         return perf_probe_api(perf_probe_context_switch);
107 }
108
109 bool perf_can_record_cpu_wide(void)
110 {
111         struct perf_event_attr attr = {
112                 .type = PERF_TYPE_SOFTWARE,
113                 .config = PERF_COUNT_SW_CPU_CLOCK,
114                 .exclude_kernel = 1,
115         };
116         struct cpu_map *cpus;
117         int cpu, fd;
118
119         cpus = cpu_map__new(NULL);
120         if (!cpus)
121                 return false;
122         cpu = cpus->map[0];
123         cpu_map__put(cpus);
124
125         fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
126         if (fd < 0)
127                 return false;
128         close(fd);
129
130         return true;
131 }
132
133 void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
134                          struct callchain_param *callchain)
135 {
136         struct perf_evsel *evsel;
137         bool use_sample_identifier = false;
138         bool use_comm_exec;
139
140         /*
141          * Set the evsel leader links before we configure attributes,
142          * since some might depend on this info.
143          */
144         if (opts->group)
145                 perf_evlist__set_leader(evlist);
146
147         if (evlist->cpus->map[0] < 0)
148                 opts->no_inherit = true;
149
150         use_comm_exec = perf_can_comm_exec();
151
152         evlist__for_each_entry(evlist, evsel) {
153                 perf_evsel__config(evsel, opts, callchain);
154                 if (evsel->tracking && use_comm_exec)
155                         evsel->attr.comm_exec = 1;
156         }
157
158         if (opts->full_auxtrace) {
159                 /*
160                  * Need to be able to synthesize and parse selected events with
161                  * arbitrary sample types, which requires always being able to
162                  * match the id.
163                  */
164                 use_sample_identifier = perf_can_sample_identifier();
165                 evlist__for_each_entry(evlist, evsel)
166                         perf_evsel__set_sample_id(evsel, use_sample_identifier);
167         } else if (evlist->nr_entries > 1) {
168                 struct perf_evsel *first = perf_evlist__first(evlist);
169
170                 evlist__for_each_entry(evlist, evsel) {
171                         if (evsel->attr.sample_type == first->attr.sample_type)
172                                 continue;
173                         use_sample_identifier = perf_can_sample_identifier();
174                         break;
175                 }
176                 evlist__for_each_entry(evlist, evsel)
177                         perf_evsel__set_sample_id(evsel, use_sample_identifier);
178         }
179
180         perf_evlist__set_id_pos(evlist);
181 }
182
183 static int get_max_rate(unsigned int *rate)
184 {
185         return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
186 }
187
188 static int record_opts__config_freq(struct record_opts *opts)
189 {
190         bool user_freq = opts->user_freq != UINT_MAX;
191         unsigned int max_rate;
192
193         if (opts->user_interval != ULLONG_MAX)
194                 opts->default_interval = opts->user_interval;
195         if (user_freq)
196                 opts->freq = opts->user_freq;
197
198         /*
199          * User specified count overrides default frequency.
200          */
201         if (opts->default_interval)
202                 opts->freq = 0;
203         else if (opts->freq) {
204                 opts->default_interval = opts->freq;
205         } else {
206                 pr_err("frequency and count are zero, aborting\n");
207                 return -1;
208         }
209
210         if (get_max_rate(&max_rate))
211                 return 0;
212
213         /*
214          * User specified frequency is over current maximum.
215          */
216         if (user_freq && (max_rate < opts->freq)) {
217                 pr_err("Maximum frequency rate (%u) reached.\n"
218                    "Please use -F freq option with lower value or consider\n"
219                    "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
220                    max_rate);
221                 return -1;
222         }
223
224         /*
225          * Default frequency is over current maximum.
226          */
227         if (max_rate < opts->freq) {
228                 pr_warning("Lowering default frequency rate to %u.\n"
229                            "Please consider tweaking "
230                            "/proc/sys/kernel/perf_event_max_sample_rate.\n",
231                            max_rate);
232                 opts->freq = max_rate;
233         }
234
235         return 0;
236 }
237
238 int record_opts__config(struct record_opts *opts)
239 {
240         return record_opts__config_freq(opts);
241 }
242
243 bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
244 {
245         struct perf_evlist *temp_evlist;
246         struct perf_evsel *evsel;
247         int err, fd, cpu;
248         bool ret = false;
249         pid_t pid = -1;
250
251         temp_evlist = perf_evlist__new();
252         if (!temp_evlist)
253                 return false;
254
255         err = parse_events(temp_evlist, str, NULL);
256         if (err)
257                 goto out_delete;
258
259         evsel = perf_evlist__last(temp_evlist);
260
261         if (!evlist || cpu_map__empty(evlist->cpus)) {
262                 struct cpu_map *cpus = cpu_map__new(NULL);
263
264                 cpu =  cpus ? cpus->map[0] : 0;
265                 cpu_map__put(cpus);
266         } else {
267                 cpu = evlist->cpus->map[0];
268         }
269
270         while (1) {
271                 fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1,
272                                          perf_event_open_cloexec_flag());
273                 if (fd < 0) {
274                         if (pid == -1 && errno == EACCES) {
275                                 pid = 0;
276                                 continue;
277                         }
278                         goto out_delete;
279                 }
280                 break;
281         }
282         close(fd);
283         ret = true;
284
285 out_delete:
286         perf_evlist__delete(temp_evlist);
287         return ret;
288 }