]> git.karo-electronics.de Git - karo-tx-linux.git/blob - kernel/trace/trace_irqsoff.c
tracing: Move "display-graph" option to main options
[karo-tx-linux.git] / kernel / trace / trace_irqsoff.c
1 /*
2  * trace irqs off critical timings
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * From code in the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 Nadia Yvette Chambers
11  */
12 #include <linux/kallsyms.h>
13 #include <linux/uaccess.h>
14 #include <linux/module.h>
15 #include <linux/ftrace.h>
16
17 #include "trace.h"
18
19 static struct trace_array               *irqsoff_trace __read_mostly;
20 static int                              tracer_enabled __read_mostly;
21
22 static DEFINE_PER_CPU(int, tracing_cpu);
23
24 static DEFINE_RAW_SPINLOCK(max_trace_lock);
25
26 enum {
27         TRACER_IRQS_OFF         = (1 << 1),
28         TRACER_PREEMPT_OFF      = (1 << 2),
29 };
30
31 static int trace_type __read_mostly;
32
33 static int save_flags;
34 static bool function_enabled;
35
36 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
37 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
38
39 #ifdef CONFIG_PREEMPT_TRACER
40 static inline int
41 preempt_trace(void)
42 {
43         return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
44 }
45 #else
46 # define preempt_trace() (0)
47 #endif
48
49 #ifdef CONFIG_IRQSOFF_TRACER
50 static inline int
51 irq_trace(void)
52 {
53         return ((trace_type & TRACER_IRQS_OFF) &&
54                 irqs_disabled());
55 }
56 #else
57 # define irq_trace() (0)
58 #endif
59
60 #define is_graph() (trace_flags & TRACE_ITER_DISPLAY_GRAPH)
61
62 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
63 static int irqsoff_display_graph(struct trace_array *tr, int set);
64 #else
65 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
66 {
67         return -EINVAL;
68 }
69 #endif
70
71 /*
72  * Sequence count - we record it when starting a measurement and
73  * skip the latency if the sequence has changed - some other section
74  * did a maximum and could disturb our measurement with serial console
75  * printouts, etc. Truly coinciding maximum latencies should be rare
76  * and what happens together happens separately as well, so this doesn't
77  * decrease the validity of the maximum found:
78  */
79 static __cacheline_aligned_in_smp       unsigned long max_sequence;
80
81 #ifdef CONFIG_FUNCTION_TRACER
82 /*
83  * Prologue for the preempt and irqs off function tracers.
84  *
85  * Returns 1 if it is OK to continue, and data->disabled is
86  *            incremented.
87  *         0 if the trace is to be ignored, and data->disabled
88  *            is kept the same.
89  *
90  * Note, this function is also used outside this ifdef but
91  *  inside the #ifdef of the function graph tracer below.
92  *  This is OK, since the function graph tracer is
93  *  dependent on the function tracer.
94  */
95 static int func_prolog_dec(struct trace_array *tr,
96                            struct trace_array_cpu **data,
97                            unsigned long *flags)
98 {
99         long disabled;
100         int cpu;
101
102         /*
103          * Does not matter if we preempt. We test the flags
104          * afterward, to see if irqs are disabled or not.
105          * If we preempt and get a false positive, the flags
106          * test will fail.
107          */
108         cpu = raw_smp_processor_id();
109         if (likely(!per_cpu(tracing_cpu, cpu)))
110                 return 0;
111
112         local_save_flags(*flags);
113         /* slight chance to get a false positive on tracing_cpu */
114         if (!irqs_disabled_flags(*flags))
115                 return 0;
116
117         *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
118         disabled = atomic_inc_return(&(*data)->disabled);
119
120         if (likely(disabled == 1))
121                 return 1;
122
123         atomic_dec(&(*data)->disabled);
124
125         return 0;
126 }
127
128 /*
129  * irqsoff uses its own tracer function to keep the overhead down:
130  */
131 static void
132 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
133                     struct ftrace_ops *op, struct pt_regs *pt_regs)
134 {
135         struct trace_array *tr = irqsoff_trace;
136         struct trace_array_cpu *data;
137         unsigned long flags;
138
139         if (!func_prolog_dec(tr, &data, &flags))
140                 return;
141
142         trace_function(tr, ip, parent_ip, flags, preempt_count());
143
144         atomic_dec(&data->disabled);
145 }
146 #endif /* CONFIG_FUNCTION_TRACER */
147
148 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
149 static int irqsoff_display_graph(struct trace_array *tr, int set)
150 {
151         int cpu;
152
153         if (!(is_graph() ^ set))
154                 return 0;
155
156         stop_irqsoff_tracer(irqsoff_trace, !set);
157
158         for_each_possible_cpu(cpu)
159                 per_cpu(tracing_cpu, cpu) = 0;
160
161         tr->max_latency = 0;
162         tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
163
164         return start_irqsoff_tracer(irqsoff_trace, set);
165 }
166
167 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
168 {
169         struct trace_array *tr = irqsoff_trace;
170         struct trace_array_cpu *data;
171         unsigned long flags;
172         int ret;
173         int pc;
174
175         if (!func_prolog_dec(tr, &data, &flags))
176                 return 0;
177
178         pc = preempt_count();
179         ret = __trace_graph_entry(tr, trace, flags, pc);
180         atomic_dec(&data->disabled);
181
182         return ret;
183 }
184
185 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
186 {
187         struct trace_array *tr = irqsoff_trace;
188         struct trace_array_cpu *data;
189         unsigned long flags;
190         int pc;
191
192         if (!func_prolog_dec(tr, &data, &flags))
193                 return;
194
195         pc = preempt_count();
196         __trace_graph_return(tr, trace, flags, pc);
197         atomic_dec(&data->disabled);
198 }
199
200 static void irqsoff_trace_open(struct trace_iterator *iter)
201 {
202         if (is_graph())
203                 graph_trace_open(iter);
204
205 }
206
207 static void irqsoff_trace_close(struct trace_iterator *iter)
208 {
209         if (iter->private)
210                 graph_trace_close(iter);
211 }
212
213 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
214                             TRACE_GRAPH_PRINT_PROC | \
215                             TRACE_GRAPH_PRINT_ABS_TIME | \
216                             TRACE_GRAPH_PRINT_DURATION)
217
218 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
219 {
220         /*
221          * In graph mode call the graph tracer output function,
222          * otherwise go with the TRACE_FN event handler
223          */
224         if (is_graph())
225                 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
226
227         return TRACE_TYPE_UNHANDLED;
228 }
229
230 static void irqsoff_print_header(struct seq_file *s)
231 {
232         if (is_graph())
233                 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
234         else
235                 trace_default_header(s);
236 }
237
238 static void
239 __trace_function(struct trace_array *tr,
240                  unsigned long ip, unsigned long parent_ip,
241                  unsigned long flags, int pc)
242 {
243         if (is_graph())
244                 trace_graph_function(tr, ip, parent_ip, flags, pc);
245         else
246                 trace_function(tr, ip, parent_ip, flags, pc);
247 }
248
249 #else
250 #define __trace_function trace_function
251
252 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
253 {
254         return -1;
255 }
256
257 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
258 {
259         return TRACE_TYPE_UNHANDLED;
260 }
261
262 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
263 static void irqsoff_trace_open(struct trace_iterator *iter) { }
264 static void irqsoff_trace_close(struct trace_iterator *iter) { }
265
266 #ifdef CONFIG_FUNCTION_TRACER
267 static void irqsoff_print_header(struct seq_file *s)
268 {
269         trace_default_header(s);
270 }
271 #else
272 static void irqsoff_print_header(struct seq_file *s)
273 {
274         trace_latency_header(s);
275 }
276 #endif /* CONFIG_FUNCTION_TRACER */
277 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
278
279 /*
280  * Should this new latency be reported/recorded?
281  */
282 static int report_latency(struct trace_array *tr, cycle_t delta)
283 {
284         if (tracing_thresh) {
285                 if (delta < tracing_thresh)
286                         return 0;
287         } else {
288                 if (delta <= tr->max_latency)
289                         return 0;
290         }
291         return 1;
292 }
293
294 static void
295 check_critical_timing(struct trace_array *tr,
296                       struct trace_array_cpu *data,
297                       unsigned long parent_ip,
298                       int cpu)
299 {
300         cycle_t T0, T1, delta;
301         unsigned long flags;
302         int pc;
303
304         T0 = data->preempt_timestamp;
305         T1 = ftrace_now(cpu);
306         delta = T1-T0;
307
308         local_save_flags(flags);
309
310         pc = preempt_count();
311
312         if (!report_latency(tr, delta))
313                 goto out;
314
315         raw_spin_lock_irqsave(&max_trace_lock, flags);
316
317         /* check if we are still the max latency */
318         if (!report_latency(tr, delta))
319                 goto out_unlock;
320
321         __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
322         /* Skip 5 functions to get to the irq/preempt enable function */
323         __trace_stack(tr, flags, 5, pc);
324
325         if (data->critical_sequence != max_sequence)
326                 goto out_unlock;
327
328         data->critical_end = parent_ip;
329
330         if (likely(!is_tracing_stopped())) {
331                 tr->max_latency = delta;
332                 update_max_tr_single(tr, current, cpu);
333         }
334
335         max_sequence++;
336
337 out_unlock:
338         raw_spin_unlock_irqrestore(&max_trace_lock, flags);
339
340 out:
341         data->critical_sequence = max_sequence;
342         data->preempt_timestamp = ftrace_now(cpu);
343         __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
344 }
345
346 static inline void
347 start_critical_timing(unsigned long ip, unsigned long parent_ip)
348 {
349         int cpu;
350         struct trace_array *tr = irqsoff_trace;
351         struct trace_array_cpu *data;
352         unsigned long flags;
353
354         if (!tracer_enabled || !tracing_is_enabled())
355                 return;
356
357         cpu = raw_smp_processor_id();
358
359         if (per_cpu(tracing_cpu, cpu))
360                 return;
361
362         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
363
364         if (unlikely(!data) || atomic_read(&data->disabled))
365                 return;
366
367         atomic_inc(&data->disabled);
368
369         data->critical_sequence = max_sequence;
370         data->preempt_timestamp = ftrace_now(cpu);
371         data->critical_start = parent_ip ? : ip;
372
373         local_save_flags(flags);
374
375         __trace_function(tr, ip, parent_ip, flags, preempt_count());
376
377         per_cpu(tracing_cpu, cpu) = 1;
378
379         atomic_dec(&data->disabled);
380 }
381
382 static inline void
383 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
384 {
385         int cpu;
386         struct trace_array *tr = irqsoff_trace;
387         struct trace_array_cpu *data;
388         unsigned long flags;
389
390         cpu = raw_smp_processor_id();
391         /* Always clear the tracing cpu on stopping the trace */
392         if (unlikely(per_cpu(tracing_cpu, cpu)))
393                 per_cpu(tracing_cpu, cpu) = 0;
394         else
395                 return;
396
397         if (!tracer_enabled || !tracing_is_enabled())
398                 return;
399
400         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
401
402         if (unlikely(!data) ||
403             !data->critical_start || atomic_read(&data->disabled))
404                 return;
405
406         atomic_inc(&data->disabled);
407
408         local_save_flags(flags);
409         __trace_function(tr, ip, parent_ip, flags, preempt_count());
410         check_critical_timing(tr, data, parent_ip ? : ip, cpu);
411         data->critical_start = 0;
412         atomic_dec(&data->disabled);
413 }
414
415 /* start and stop critical timings used to for stoppage (in idle) */
416 void start_critical_timings(void)
417 {
418         if (preempt_trace() || irq_trace())
419                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
420 }
421 EXPORT_SYMBOL_GPL(start_critical_timings);
422
423 void stop_critical_timings(void)
424 {
425         if (preempt_trace() || irq_trace())
426                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
427 }
428 EXPORT_SYMBOL_GPL(stop_critical_timings);
429
430 #ifdef CONFIG_IRQSOFF_TRACER
431 #ifdef CONFIG_PROVE_LOCKING
432 void time_hardirqs_on(unsigned long a0, unsigned long a1)
433 {
434         if (!preempt_trace() && irq_trace())
435                 stop_critical_timing(a0, a1);
436 }
437
438 void time_hardirqs_off(unsigned long a0, unsigned long a1)
439 {
440         if (!preempt_trace() && irq_trace())
441                 start_critical_timing(a0, a1);
442 }
443
444 #else /* !CONFIG_PROVE_LOCKING */
445
446 /*
447  * Stubs:
448  */
449
450 void trace_softirqs_on(unsigned long ip)
451 {
452 }
453
454 void trace_softirqs_off(unsigned long ip)
455 {
456 }
457
458 inline void print_irqtrace_events(struct task_struct *curr)
459 {
460 }
461
462 /*
463  * We are only interested in hardirq on/off events:
464  */
465 void trace_hardirqs_on(void)
466 {
467         if (!preempt_trace() && irq_trace())
468                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
469 }
470 EXPORT_SYMBOL(trace_hardirqs_on);
471
472 void trace_hardirqs_off(void)
473 {
474         if (!preempt_trace() && irq_trace())
475                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
476 }
477 EXPORT_SYMBOL(trace_hardirqs_off);
478
479 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
480 {
481         if (!preempt_trace() && irq_trace())
482                 stop_critical_timing(CALLER_ADDR0, caller_addr);
483 }
484 EXPORT_SYMBOL(trace_hardirqs_on_caller);
485
486 __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
487 {
488         if (!preempt_trace() && irq_trace())
489                 start_critical_timing(CALLER_ADDR0, caller_addr);
490 }
491 EXPORT_SYMBOL(trace_hardirqs_off_caller);
492
493 #endif /* CONFIG_PROVE_LOCKING */
494 #endif /*  CONFIG_IRQSOFF_TRACER */
495
496 #ifdef CONFIG_PREEMPT_TRACER
497 void trace_preempt_on(unsigned long a0, unsigned long a1)
498 {
499         if (preempt_trace() && !irq_trace())
500                 stop_critical_timing(a0, a1);
501 }
502
503 void trace_preempt_off(unsigned long a0, unsigned long a1)
504 {
505         if (preempt_trace() && !irq_trace())
506                 start_critical_timing(a0, a1);
507 }
508 #endif /* CONFIG_PREEMPT_TRACER */
509
510 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
511 {
512         int ret;
513
514         /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
515         if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
516                 return 0;
517
518         if (graph)
519                 ret = register_ftrace_graph(&irqsoff_graph_return,
520                                             &irqsoff_graph_entry);
521         else
522                 ret = register_ftrace_function(tr->ops);
523
524         if (!ret)
525                 function_enabled = true;
526
527         return ret;
528 }
529
530 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
531 {
532         if (!function_enabled)
533                 return;
534
535         if (graph)
536                 unregister_ftrace_graph();
537         else
538                 unregister_ftrace_function(tr->ops);
539
540         function_enabled = false;
541 }
542
543 static int irqsoff_function_set(struct trace_array *tr, int set)
544 {
545         if (set)
546                 register_irqsoff_function(tr, is_graph(), 1);
547         else
548                 unregister_irqsoff_function(tr, is_graph());
549         return 0;
550 }
551
552 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
553 {
554         struct tracer *tracer = tr->current_trace;
555
556         if (mask & TRACE_ITER_FUNCTION)
557                 return irqsoff_function_set(tr, set);
558
559         if (mask & TRACE_ITER_DISPLAY_GRAPH)
560                 return irqsoff_display_graph(tr, set);
561
562         return trace_keep_overwrite(tracer, mask, set);
563 }
564
565 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
566 {
567         int ret;
568
569         ret = register_irqsoff_function(tr, graph, 0);
570
571         if (!ret && tracing_is_enabled())
572                 tracer_enabled = 1;
573         else
574                 tracer_enabled = 0;
575
576         return ret;
577 }
578
579 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
580 {
581         tracer_enabled = 0;
582
583         unregister_irqsoff_function(tr, graph);
584 }
585
586 static bool irqsoff_busy;
587
588 static int __irqsoff_tracer_init(struct trace_array *tr)
589 {
590         if (irqsoff_busy)
591                 return -EBUSY;
592
593         save_flags = trace_flags;
594
595         /* non overwrite screws up the latency tracers */
596         set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
597         set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
598
599         tr->max_latency = 0;
600         irqsoff_trace = tr;
601         /* make sure that the tracer is visible */
602         smp_wmb();
603         tracing_reset_online_cpus(&tr->trace_buffer);
604
605         ftrace_init_array_ops(tr, irqsoff_tracer_call);
606
607         /* Only toplevel instance supports graph tracing */
608         if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
609                                       is_graph())))
610                 printk(KERN_ERR "failed to start irqsoff tracer\n");
611
612         irqsoff_busy = true;
613         return 0;
614 }
615
616 static void irqsoff_tracer_reset(struct trace_array *tr)
617 {
618         int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
619         int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
620
621         stop_irqsoff_tracer(tr, is_graph());
622
623         set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
624         set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
625         ftrace_reset_array_ops(tr);
626
627         irqsoff_busy = false;
628 }
629
630 static void irqsoff_tracer_start(struct trace_array *tr)
631 {
632         tracer_enabled = 1;
633 }
634
635 static void irqsoff_tracer_stop(struct trace_array *tr)
636 {
637         tracer_enabled = 0;
638 }
639
640 #ifdef CONFIG_IRQSOFF_TRACER
641 static int irqsoff_tracer_init(struct trace_array *tr)
642 {
643         trace_type = TRACER_IRQS_OFF;
644
645         return __irqsoff_tracer_init(tr);
646 }
647 static struct tracer irqsoff_tracer __read_mostly =
648 {
649         .name           = "irqsoff",
650         .init           = irqsoff_tracer_init,
651         .reset          = irqsoff_tracer_reset,
652         .start          = irqsoff_tracer_start,
653         .stop           = irqsoff_tracer_stop,
654         .print_max      = true,
655         .print_header   = irqsoff_print_header,
656         .print_line     = irqsoff_print_line,
657         .flag_changed   = irqsoff_flag_changed,
658 #ifdef CONFIG_FTRACE_SELFTEST
659         .selftest    = trace_selftest_startup_irqsoff,
660 #endif
661         .open           = irqsoff_trace_open,
662         .close          = irqsoff_trace_close,
663         .allow_instances = true,
664         .use_max_tr     = true,
665 };
666 # define register_irqsoff(trace) register_tracer(&trace)
667 #else
668 # define register_irqsoff(trace) do { } while (0)
669 #endif
670
671 #ifdef CONFIG_PREEMPT_TRACER
672 static int preemptoff_tracer_init(struct trace_array *tr)
673 {
674         trace_type = TRACER_PREEMPT_OFF;
675
676         return __irqsoff_tracer_init(tr);
677 }
678
679 static struct tracer preemptoff_tracer __read_mostly =
680 {
681         .name           = "preemptoff",
682         .init           = preemptoff_tracer_init,
683         .reset          = irqsoff_tracer_reset,
684         .start          = irqsoff_tracer_start,
685         .stop           = irqsoff_tracer_stop,
686         .print_max      = true,
687         .print_header   = irqsoff_print_header,
688         .print_line     = irqsoff_print_line,
689         .flag_changed   = irqsoff_flag_changed,
690 #ifdef CONFIG_FTRACE_SELFTEST
691         .selftest    = trace_selftest_startup_preemptoff,
692 #endif
693         .open           = irqsoff_trace_open,
694         .close          = irqsoff_trace_close,
695         .allow_instances = true,
696         .use_max_tr     = true,
697 };
698 # define register_preemptoff(trace) register_tracer(&trace)
699 #else
700 # define register_preemptoff(trace) do { } while (0)
701 #endif
702
703 #if defined(CONFIG_IRQSOFF_TRACER) && \
704         defined(CONFIG_PREEMPT_TRACER)
705
706 static int preemptirqsoff_tracer_init(struct trace_array *tr)
707 {
708         trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
709
710         return __irqsoff_tracer_init(tr);
711 }
712
713 static struct tracer preemptirqsoff_tracer __read_mostly =
714 {
715         .name           = "preemptirqsoff",
716         .init           = preemptirqsoff_tracer_init,
717         .reset          = irqsoff_tracer_reset,
718         .start          = irqsoff_tracer_start,
719         .stop           = irqsoff_tracer_stop,
720         .print_max      = true,
721         .print_header   = irqsoff_print_header,
722         .print_line     = irqsoff_print_line,
723         .flag_changed   = irqsoff_flag_changed,
724 #ifdef CONFIG_FTRACE_SELFTEST
725         .selftest    = trace_selftest_startup_preemptirqsoff,
726 #endif
727         .open           = irqsoff_trace_open,
728         .close          = irqsoff_trace_close,
729         .allow_instances = true,
730         .use_max_tr     = true,
731 };
732
733 # define register_preemptirqsoff(trace) register_tracer(&trace)
734 #else
735 # define register_preemptirqsoff(trace) do { } while (0)
736 #endif
737
738 __init static int init_irqsoff_tracer(void)
739 {
740         register_irqsoff(irqsoff_tracer);
741         register_preemptoff(preemptoff_tracer);
742         register_preemptirqsoff(preemptirqsoff_tracer);
743
744         return 0;
745 }
746 core_initcall(init_irqsoff_tracer);