]> git.karo-electronics.de Git - karo-tx-linux.git/blob - kernel/trace/trace_functions.c
Merge tag 'samsung-fixes-2nd-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[karo-tx-linux.git] / kernel / trace / trace_functions.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Based on code from the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 Nadia Yvette Chambers
11  */
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/slab.h>
17 #include <linux/fs.h>
18
19 #include "trace.h"
20
21 static void tracing_start_function_trace(struct trace_array *tr);
22 static void tracing_stop_function_trace(struct trace_array *tr);
23 static void
24 function_trace_call(unsigned long ip, unsigned long parent_ip,
25                     struct ftrace_ops *op, struct pt_regs *pt_regs);
26 static void
27 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28                           struct ftrace_ops *op, struct pt_regs *pt_regs);
29 static struct ftrace_ops trace_ops;
30 static struct ftrace_ops trace_stack_ops;
31 static struct tracer_flags func_flags;
32
33 /* Our option */
34 enum {
35         TRACE_FUNC_OPT_STACK    = 0x1,
36 };
37
38 static int allocate_ftrace_ops(struct trace_array *tr)
39 {
40         struct ftrace_ops *ops;
41
42         ops = kzalloc(sizeof(*ops), GFP_KERNEL);
43         if (!ops)
44                 return -ENOMEM;
45
46         /* Currently only the non stack verision is supported */
47         ops->func = function_trace_call;
48         ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
49
50         tr->ops = ops;
51         ops->private = tr;
52         return 0;
53 }
54
55
56 int ftrace_create_function_files(struct trace_array *tr,
57                                  struct dentry *parent)
58 {
59         int ret;
60
61         /*
62          * The top level array uses the "global_ops", and the files are
63          * created on boot up.
64          */
65         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
66                 return 0;
67
68         ret = allocate_ftrace_ops(tr);
69         if (ret)
70                 return ret;
71
72         ftrace_create_filter_files(tr->ops, parent);
73
74         return 0;
75 }
76
77 void ftrace_destroy_function_files(struct trace_array *tr)
78 {
79         ftrace_destroy_filter_files(tr->ops);
80         kfree(tr->ops);
81         tr->ops = NULL;
82 }
83
84 static int function_trace_init(struct trace_array *tr)
85 {
86         struct ftrace_ops *ops;
87
88         if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
89                 /* There's only one global tr */
90                 if (!trace_ops.private) {
91                         trace_ops.private = tr;
92                         trace_stack_ops.private = tr;
93                 }
94
95                 if (func_flags.val & TRACE_FUNC_OPT_STACK)
96                         ops = &trace_stack_ops;
97                 else
98                         ops = &trace_ops;
99                 tr->ops = ops;
100         } else if (!tr->ops) {
101                 /*
102                  * Instance trace_arrays get their ops allocated
103                  * at instance creation. Unless it failed
104                  * the allocation.
105                  */
106                 return -ENOMEM;
107         }
108
109         tr->trace_buffer.cpu = get_cpu();
110         put_cpu();
111
112         tracing_start_cmdline_record();
113         tracing_start_function_trace(tr);
114         return 0;
115 }
116
117 static void function_trace_reset(struct trace_array *tr)
118 {
119         tracing_stop_function_trace(tr);
120         tracing_stop_cmdline_record();
121 }
122
123 static void function_trace_start(struct trace_array *tr)
124 {
125         tracing_reset_online_cpus(&tr->trace_buffer);
126 }
127
128 static void
129 function_trace_call(unsigned long ip, unsigned long parent_ip,
130                     struct ftrace_ops *op, struct pt_regs *pt_regs)
131 {
132         struct trace_array *tr = op->private;
133         struct trace_array_cpu *data;
134         unsigned long flags;
135         int bit;
136         int cpu;
137         int pc;
138
139         if (unlikely(!tr->function_enabled))
140                 return;
141
142         pc = preempt_count();
143         preempt_disable_notrace();
144
145         bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
146         if (bit < 0)
147                 goto out;
148
149         cpu = smp_processor_id();
150         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
151         if (!atomic_read(&data->disabled)) {
152                 local_save_flags(flags);
153                 trace_function(tr, ip, parent_ip, flags, pc);
154         }
155         trace_clear_recursion(bit);
156
157  out:
158         preempt_enable_notrace();
159 }
160
161 static void
162 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
163                           struct ftrace_ops *op, struct pt_regs *pt_regs)
164 {
165         struct trace_array *tr = op->private;
166         struct trace_array_cpu *data;
167         unsigned long flags;
168         long disabled;
169         int cpu;
170         int pc;
171
172         if (unlikely(!tr->function_enabled))
173                 return;
174
175         /*
176          * Need to use raw, since this must be called before the
177          * recursive protection is performed.
178          */
179         local_irq_save(flags);
180         cpu = raw_smp_processor_id();
181         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
182         disabled = atomic_inc_return(&data->disabled);
183
184         if (likely(disabled == 1)) {
185                 pc = preempt_count();
186                 trace_function(tr, ip, parent_ip, flags, pc);
187                 /*
188                  * skip over 5 funcs:
189                  *    __ftrace_trace_stack,
190                  *    __trace_stack,
191                  *    function_stack_trace_call
192                  *    ftrace_list_func
193                  *    ftrace_call
194                  */
195                 __trace_stack(tr, flags, 5, pc);
196         }
197
198         atomic_dec(&data->disabled);
199         local_irq_restore(flags);
200 }
201
202 static struct ftrace_ops trace_ops __read_mostly =
203 {
204         .func = function_trace_call,
205         .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
206 };
207
208 static struct ftrace_ops trace_stack_ops __read_mostly =
209 {
210         .func = function_stack_trace_call,
211         .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
212 };
213
214 static struct tracer_opt func_opts[] = {
215 #ifdef CONFIG_STACKTRACE
216         { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
217 #endif
218         { } /* Always set a last empty entry */
219 };
220
221 static struct tracer_flags func_flags = {
222         .val = 0, /* By default: all flags disabled */
223         .opts = func_opts
224 };
225
226 static void tracing_start_function_trace(struct trace_array *tr)
227 {
228         tr->function_enabled = 0;
229         register_ftrace_function(tr->ops);
230         tr->function_enabled = 1;
231 }
232
233 static void tracing_stop_function_trace(struct trace_array *tr)
234 {
235         tr->function_enabled = 0;
236         unregister_ftrace_function(tr->ops);
237 }
238
239 static int
240 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
241 {
242         switch (bit) {
243         case TRACE_FUNC_OPT_STACK:
244                 /* do nothing if already set */
245                 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
246                         break;
247
248                 unregister_ftrace_function(tr->ops);
249
250                 if (set) {
251                         tr->ops = &trace_stack_ops;
252                         register_ftrace_function(tr->ops);
253                 } else {
254                         tr->ops = &trace_ops;
255                         register_ftrace_function(tr->ops);
256                 }
257
258                 break;
259         default:
260                 return -EINVAL;
261         }
262
263         return 0;
264 }
265
266 static struct tracer function_trace __tracer_data =
267 {
268         .name           = "function",
269         .init           = function_trace_init,
270         .reset          = function_trace_reset,
271         .start          = function_trace_start,
272         .wait_pipe      = poll_wait_pipe,
273         .flags          = &func_flags,
274         .set_flag       = func_set_flag,
275         .allow_instances = true,
276 #ifdef CONFIG_FTRACE_SELFTEST
277         .selftest       = trace_selftest_startup_function,
278 #endif
279 };
280
281 #ifdef CONFIG_DYNAMIC_FTRACE
282 static int update_count(void **data)
283 {
284         unsigned long *count = (long *)data;
285
286         if (!*count)
287                 return 0;
288
289         if (*count != -1)
290                 (*count)--;
291
292         return 1;
293 }
294
295 static void
296 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
297 {
298         if (tracing_is_on())
299                 return;
300
301         if (update_count(data))
302                 tracing_on();
303 }
304
305 static void
306 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
307 {
308         if (!tracing_is_on())
309                 return;
310
311         if (update_count(data))
312                 tracing_off();
313 }
314
315 static void
316 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
317 {
318         if (tracing_is_on())
319                 return;
320
321         tracing_on();
322 }
323
324 static void
325 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
326 {
327         if (!tracing_is_on())
328                 return;
329
330         tracing_off();
331 }
332
333 /*
334  * Skip 4:
335  *   ftrace_stacktrace()
336  *   function_trace_probe_call()
337  *   ftrace_ops_list_func()
338  *   ftrace_call()
339  */
340 #define STACK_SKIP 4
341
342 static void
343 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
344 {
345         trace_dump_stack(STACK_SKIP);
346 }
347
348 static void
349 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
350 {
351         if (!tracing_is_on())
352                 return;
353
354         if (update_count(data))
355                 trace_dump_stack(STACK_SKIP);
356 }
357
358 static void
359 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
360 {
361         if (update_count(data))
362                 ftrace_dump(DUMP_ALL);
363 }
364
365 /* Only dump the current CPU buffer. */
366 static void
367 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
368 {
369         if (update_count(data))
370                 ftrace_dump(DUMP_ORIG);
371 }
372
373 static int
374 ftrace_probe_print(const char *name, struct seq_file *m,
375                    unsigned long ip, void *data)
376 {
377         long count = (long)data;
378
379         seq_printf(m, "%ps:%s", (void *)ip, name);
380
381         if (count == -1)
382                 seq_printf(m, ":unlimited\n");
383         else
384                 seq_printf(m, ":count=%ld\n", count);
385
386         return 0;
387 }
388
389 static int
390 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
391                          struct ftrace_probe_ops *ops, void *data)
392 {
393         return ftrace_probe_print("traceon", m, ip, data);
394 }
395
396 static int
397 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
398                          struct ftrace_probe_ops *ops, void *data)
399 {
400         return ftrace_probe_print("traceoff", m, ip, data);
401 }
402
403 static int
404 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
405                         struct ftrace_probe_ops *ops, void *data)
406 {
407         return ftrace_probe_print("stacktrace", m, ip, data);
408 }
409
410 static int
411 ftrace_dump_print(struct seq_file *m, unsigned long ip,
412                         struct ftrace_probe_ops *ops, void *data)
413 {
414         return ftrace_probe_print("dump", m, ip, data);
415 }
416
417 static int
418 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
419                         struct ftrace_probe_ops *ops, void *data)
420 {
421         return ftrace_probe_print("cpudump", m, ip, data);
422 }
423
424 static struct ftrace_probe_ops traceon_count_probe_ops = {
425         .func                   = ftrace_traceon_count,
426         .print                  = ftrace_traceon_print,
427 };
428
429 static struct ftrace_probe_ops traceoff_count_probe_ops = {
430         .func                   = ftrace_traceoff_count,
431         .print                  = ftrace_traceoff_print,
432 };
433
434 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
435         .func                   = ftrace_stacktrace_count,
436         .print                  = ftrace_stacktrace_print,
437 };
438
439 static struct ftrace_probe_ops dump_probe_ops = {
440         .func                   = ftrace_dump_probe,
441         .print                  = ftrace_dump_print,
442 };
443
444 static struct ftrace_probe_ops cpudump_probe_ops = {
445         .func                   = ftrace_cpudump_probe,
446         .print                  = ftrace_cpudump_print,
447 };
448
449 static struct ftrace_probe_ops traceon_probe_ops = {
450         .func                   = ftrace_traceon,
451         .print                  = ftrace_traceon_print,
452 };
453
454 static struct ftrace_probe_ops traceoff_probe_ops = {
455         .func                   = ftrace_traceoff,
456         .print                  = ftrace_traceoff_print,
457 };
458
459 static struct ftrace_probe_ops stacktrace_probe_ops = {
460         .func                   = ftrace_stacktrace,
461         .print                  = ftrace_stacktrace_print,
462 };
463
464 static int
465 ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
466                             struct ftrace_hash *hash, char *glob,
467                             char *cmd, char *param, int enable)
468 {
469         void *count = (void *)-1;
470         char *number;
471         int ret;
472
473         /* hash funcs only work with set_ftrace_filter */
474         if (!enable)
475                 return -EINVAL;
476
477         if (glob[0] == '!') {
478                 unregister_ftrace_function_probe_func(glob+1, ops);
479                 return 0;
480         }
481
482         if (!param)
483                 goto out_reg;
484
485         number = strsep(&param, ":");
486
487         if (!strlen(number))
488                 goto out_reg;
489
490         /*
491          * We use the callback data field (which is a pointer)
492          * as our counter.
493          */
494         ret = kstrtoul(number, 0, (unsigned long *)&count);
495         if (ret)
496                 return ret;
497
498  out_reg:
499         ret = register_ftrace_function_probe(glob, ops, count);
500
501         return ret < 0 ? ret : 0;
502 }
503
504 static int
505 ftrace_trace_onoff_callback(struct ftrace_hash *hash,
506                             char *glob, char *cmd, char *param, int enable)
507 {
508         struct ftrace_probe_ops *ops;
509
510         /* we register both traceon and traceoff to this callback */
511         if (strcmp(cmd, "traceon") == 0)
512                 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
513         else
514                 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
515
516         return ftrace_trace_probe_callback(ops, hash, glob, cmd,
517                                            param, enable);
518 }
519
520 static int
521 ftrace_stacktrace_callback(struct ftrace_hash *hash,
522                            char *glob, char *cmd, char *param, int enable)
523 {
524         struct ftrace_probe_ops *ops;
525
526         ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
527
528         return ftrace_trace_probe_callback(ops, hash, glob, cmd,
529                                            param, enable);
530 }
531
532 static int
533 ftrace_dump_callback(struct ftrace_hash *hash,
534                            char *glob, char *cmd, char *param, int enable)
535 {
536         struct ftrace_probe_ops *ops;
537
538         ops = &dump_probe_ops;
539
540         /* Only dump once. */
541         return ftrace_trace_probe_callback(ops, hash, glob, cmd,
542                                            "1", enable);
543 }
544
545 static int
546 ftrace_cpudump_callback(struct ftrace_hash *hash,
547                            char *glob, char *cmd, char *param, int enable)
548 {
549         struct ftrace_probe_ops *ops;
550
551         ops = &cpudump_probe_ops;
552
553         /* Only dump once. */
554         return ftrace_trace_probe_callback(ops, hash, glob, cmd,
555                                            "1", enable);
556 }
557
558 static struct ftrace_func_command ftrace_traceon_cmd = {
559         .name                   = "traceon",
560         .func                   = ftrace_trace_onoff_callback,
561 };
562
563 static struct ftrace_func_command ftrace_traceoff_cmd = {
564         .name                   = "traceoff",
565         .func                   = ftrace_trace_onoff_callback,
566 };
567
568 static struct ftrace_func_command ftrace_stacktrace_cmd = {
569         .name                   = "stacktrace",
570         .func                   = ftrace_stacktrace_callback,
571 };
572
573 static struct ftrace_func_command ftrace_dump_cmd = {
574         .name                   = "dump",
575         .func                   = ftrace_dump_callback,
576 };
577
578 static struct ftrace_func_command ftrace_cpudump_cmd = {
579         .name                   = "cpudump",
580         .func                   = ftrace_cpudump_callback,
581 };
582
583 static int __init init_func_cmd_traceon(void)
584 {
585         int ret;
586
587         ret = register_ftrace_command(&ftrace_traceoff_cmd);
588         if (ret)
589                 return ret;
590
591         ret = register_ftrace_command(&ftrace_traceon_cmd);
592         if (ret)
593                 goto out_free_traceoff;
594
595         ret = register_ftrace_command(&ftrace_stacktrace_cmd);
596         if (ret)
597                 goto out_free_traceon;
598
599         ret = register_ftrace_command(&ftrace_dump_cmd);
600         if (ret)
601                 goto out_free_stacktrace;
602
603         ret = register_ftrace_command(&ftrace_cpudump_cmd);
604         if (ret)
605                 goto out_free_dump;
606
607         return 0;
608
609  out_free_dump:
610         unregister_ftrace_command(&ftrace_dump_cmd);
611  out_free_stacktrace:
612         unregister_ftrace_command(&ftrace_stacktrace_cmd);
613  out_free_traceon:
614         unregister_ftrace_command(&ftrace_traceon_cmd);
615  out_free_traceoff:
616         unregister_ftrace_command(&ftrace_traceoff_cmd);
617
618         return ret;
619 }
620 #else
621 static inline int init_func_cmd_traceon(void)
622 {
623         return 0;
624 }
625 #endif /* CONFIG_DYNAMIC_FTRACE */
626
627 static __init int init_function_trace(void)
628 {
629         init_func_cmd_traceon();
630         return register_tracer(&function_trace);
631 }
632 core_initcall(init_function_trace);