]> git.karo-electronics.de Git - mv-sheeva.git/blob - kernel/trace/trace_irqsoff.c
ftrace: fix updates to max trace
[mv-sheeva.git] / kernel / trace / trace_irqsoff.c
1 /*
2  * trace irqs off criticall timings
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * From code in the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 William Lee Irwin III
11  */
12 #include <linux/kallsyms.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
16 #include <linux/ftrace.h>
17 #include <linux/fs.h>
18
19 #include "trace.h"
20
21 static struct trace_array               *irqsoff_trace __read_mostly;
22 static int                              tracer_enabled __read_mostly;
23
24 static DEFINE_PER_CPU(int, tracing_cpu);
25
26 static DEFINE_SPINLOCK(max_trace_lock);
27
28 enum {
29         TRACER_IRQS_OFF         = (1 << 1),
30         TRACER_PREEMPT_OFF      = (1 << 2),
31 };
32
33 static int trace_type __read_mostly;
34
35 #ifdef CONFIG_PREEMPT_TRACER
36 static inline int notrace
37 preempt_trace(void)
38 {
39         return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
40 }
41 #else
42 # define preempt_trace() (0)
43 #endif
44
45 #ifdef CONFIG_IRQSOFF_TRACER
46 static inline int notrace
47 irq_trace(void)
48 {
49         return ((trace_type & TRACER_IRQS_OFF) &&
50                 irqs_disabled());
51 }
52 #else
53 # define irq_trace() (0)
54 #endif
55
56 /*
57  * Sequence count - we record it when starting a measurement and
58  * skip the latency if the sequence has changed - some other section
59  * did a maximum and could disturb our measurement with serial console
60  * printouts, etc. Truly coinciding maximum latencies should be rare
61  * and what happens together happens separately as well, so this doesnt
62  * decrease the validity of the maximum found:
63  */
64 static __cacheline_aligned_in_smp       unsigned long max_sequence;
65
66 #ifdef CONFIG_FTRACE
67 /*
68  * irqsoff uses its own tracer function to keep the overhead down:
69  */
70 static void notrace
71 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
72 {
73         struct trace_array *tr = irqsoff_trace;
74         struct trace_array_cpu *data;
75         unsigned long flags;
76         long disabled;
77         int cpu;
78
79         /*
80          * Does not matter if we preempt. We test the flags
81          * afterward, to see if irqs are disabled or not.
82          * If we preempt and get a false positive, the flags
83          * test will fail.
84          */
85         cpu = raw_smp_processor_id();
86         if (likely(!per_cpu(tracing_cpu, cpu)))
87                 return;
88
89         local_save_flags(flags);
90         /* slight chance to get a false positive on tracing_cpu */
91         if (!irqs_disabled_flags(flags))
92                 return;
93
94         data = tr->data[cpu];
95         disabled = atomic_inc_return(&data->disabled);
96
97         if (likely(disabled == 1))
98                 ftrace(tr, data, ip, parent_ip, flags);
99
100         atomic_dec(&data->disabled);
101 }
102
103 static struct ftrace_ops trace_ops __read_mostly =
104 {
105         .func = irqsoff_tracer_call,
106 };
107 #endif /* CONFIG_FTRACE */
108
109 /*
110  * Should this new latency be reported/recorded?
111  */
112 static int notrace report_latency(cycle_t delta)
113 {
114         if (tracing_thresh) {
115                 if (delta < tracing_thresh)
116                         return 0;
117         } else {
118                 if (delta <= tracing_max_latency)
119                         return 0;
120         }
121         return 1;
122 }
123
124 static void notrace
125 check_critical_timing(struct trace_array *tr,
126                       struct trace_array_cpu *data,
127                       unsigned long parent_ip,
128                       int cpu)
129 {
130         unsigned long latency, t0, t1;
131         cycle_t T0, T1, delta;
132         unsigned long flags;
133
134         /*
135          * usecs conversion is slow so we try to delay the conversion
136          * as long as possible:
137          */
138         T0 = data->preempt_timestamp;
139         T1 = now(cpu);
140         delta = T1-T0;
141
142         local_save_flags(flags);
143
144         if (!report_latency(delta))
145                 goto out;
146
147         spin_lock(&max_trace_lock);
148
149         /* check if we are still the max latency */
150         if (!report_latency(delta))
151                 goto out_unlock;
152
153         ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
154
155         latency = nsecs_to_usecs(delta);
156
157         if (data->critical_sequence != max_sequence)
158                 goto out_unlock;
159
160         tracing_max_latency = delta;
161         t0 = nsecs_to_usecs(T0);
162         t1 = nsecs_to_usecs(T1);
163
164         data->critical_end = parent_ip;
165
166         update_max_tr_single(tr, current, cpu);
167
168         if (tracing_thresh)
169                 printk(KERN_INFO "(%16s-%-5d|#%d): %lu us critical section "
170                        "violates %lu us threshold.\n"
171                        " => started at timestamp %lu: ",
172                                 current->comm, current->pid,
173                                 raw_smp_processor_id(),
174                                 latency, nsecs_to_usecs(tracing_thresh), t0);
175         else
176                 printk(KERN_INFO "(%16s-%-5d|#%d):"
177                        " new %lu us maximum-latency "
178                        "critical section.\n => started at timestamp %lu: ",
179                                 current->comm, current->pid,
180                                 raw_smp_processor_id(),
181                                 latency, t0);
182
183         print_symbol(KERN_CONT "<%s>\n", data->critical_start);
184         printk(KERN_CONT " =>   ended at timestamp %lu: ", t1);
185         print_symbol(KERN_CONT "<%s>\n", data->critical_end);
186         dump_stack();
187         t1 = nsecs_to_usecs(now(cpu));
188         printk(KERN_CONT " =>   dump-end timestamp %lu\n\n", t1);
189
190         max_sequence++;
191
192 out_unlock:
193         spin_unlock(&max_trace_lock);
194
195 out:
196         data->critical_sequence = max_sequence;
197         data->preempt_timestamp = now(cpu);
198         tracing_reset(data);
199         ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
200 }
201
202 static inline void notrace
203 start_critical_timing(unsigned long ip, unsigned long parent_ip)
204 {
205         int cpu;
206         struct trace_array *tr = irqsoff_trace;
207         struct trace_array_cpu *data;
208         unsigned long flags;
209
210         if (likely(!tracer_enabled))
211                 return;
212
213         if (__get_cpu_var(tracing_cpu))
214                 return;
215
216         cpu = raw_smp_processor_id();
217         data = tr->data[cpu];
218
219         if (unlikely(!data) || unlikely(!data->trace) ||
220             atomic_read(&data->disabled))
221                 return;
222
223         atomic_inc(&data->disabled);
224
225         data->critical_sequence = max_sequence;
226         data->preempt_timestamp = now(cpu);
227         data->critical_start = parent_ip ? : ip;
228         tracing_reset(data);
229
230         local_save_flags(flags);
231
232         ftrace(tr, data, ip, parent_ip, flags);
233
234         __get_cpu_var(tracing_cpu) = 1;
235
236         atomic_dec(&data->disabled);
237 }
238
239 static inline void notrace
240 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
241 {
242         int cpu;
243         struct trace_array *tr = irqsoff_trace;
244         struct trace_array_cpu *data;
245         unsigned long flags;
246
247         /* Always clear the tracing cpu on stopping the trace */
248         if (unlikely(__get_cpu_var(tracing_cpu)))
249                 __get_cpu_var(tracing_cpu) = 0;
250         else
251                 return;
252
253         if (!tracer_enabled)
254                 return;
255
256         cpu = raw_smp_processor_id();
257         data = tr->data[cpu];
258
259         if (unlikely(!data) || unlikely(!data->trace) ||
260             !data->critical_start || atomic_read(&data->disabled))
261                 return;
262
263         atomic_inc(&data->disabled);
264         local_save_flags(flags);
265         ftrace(tr, data, ip, parent_ip, flags);
266         check_critical_timing(tr, data, parent_ip ? : ip, cpu);
267         data->critical_start = 0;
268         atomic_dec(&data->disabled);
269 }
270
271 /* start and stop critical timings used to for stoppage (in idle) */
272 void notrace start_critical_timings(void)
273 {
274         if (preempt_trace() || irq_trace())
275                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
276 }
277
278 void notrace stop_critical_timings(void)
279 {
280         if (preempt_trace() || irq_trace())
281                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
282 }
283
284 #ifdef CONFIG_IRQSOFF_TRACER
285 #ifdef CONFIG_PROVE_LOCKING
286 void notrace time_hardirqs_on(unsigned long a0, unsigned long a1)
287 {
288         if (!preempt_trace() && irq_trace())
289                 stop_critical_timing(a0, a1);
290 }
291
292 void notrace time_hardirqs_off(unsigned long a0, unsigned long a1)
293 {
294         if (!preempt_trace() && irq_trace())
295                 start_critical_timing(a0, a1);
296 }
297
298 #else /* !CONFIG_PROVE_LOCKING */
299
300 /*
301  * Stubs:
302  */
303
304 void early_boot_irqs_off(void)
305 {
306 }
307
308 void early_boot_irqs_on(void)
309 {
310 }
311
312 void trace_softirqs_on(unsigned long ip)
313 {
314 }
315
316 void trace_softirqs_off(unsigned long ip)
317 {
318 }
319
320 inline void print_irqtrace_events(struct task_struct *curr)
321 {
322 }
323
324 /*
325  * We are only interested in hardirq on/off events:
326  */
327 void notrace trace_hardirqs_on(void)
328 {
329         if (!preempt_trace() && irq_trace())
330                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
331 }
332 EXPORT_SYMBOL(trace_hardirqs_on);
333
334 void notrace trace_hardirqs_off(void)
335 {
336         if (!preempt_trace() && irq_trace())
337                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
338 }
339 EXPORT_SYMBOL(trace_hardirqs_off);
340
341 void notrace trace_hardirqs_on_caller(unsigned long caller_addr)
342 {
343         if (!preempt_trace() && irq_trace())
344                 stop_critical_timing(CALLER_ADDR0, caller_addr);
345 }
346 EXPORT_SYMBOL(trace_hardirqs_on_caller);
347
348 void notrace trace_hardirqs_off_caller(unsigned long caller_addr)
349 {
350         if (!preempt_trace() && irq_trace())
351                 start_critical_timing(CALLER_ADDR0, caller_addr);
352 }
353 EXPORT_SYMBOL(trace_hardirqs_off_caller);
354
355 #endif /* CONFIG_PROVE_LOCKING */
356 #endif /*  CONFIG_IRQSOFF_TRACER */
357
358 #ifdef CONFIG_PREEMPT_TRACER
359 void notrace trace_preempt_on(unsigned long a0, unsigned long a1)
360 {
361         stop_critical_timing(a0, a1);
362 }
363
364 void notrace trace_preempt_off(unsigned long a0, unsigned long a1)
365 {
366         start_critical_timing(a0, a1);
367 }
368 #endif /* CONFIG_PREEMPT_TRACER */
369
370 static void start_irqsoff_tracer(struct trace_array *tr)
371 {
372         register_ftrace_function(&trace_ops);
373         tracer_enabled = 1;
374 }
375
376 static void stop_irqsoff_tracer(struct trace_array *tr)
377 {
378         tracer_enabled = 0;
379         unregister_ftrace_function(&trace_ops);
380 }
381
382 static void __irqsoff_tracer_init(struct trace_array *tr)
383 {
384         irqsoff_trace = tr;
385         /* make sure that the tracer is visibel */
386         smp_wmb();
387
388         if (tr->ctrl)
389                 start_irqsoff_tracer(tr);
390 }
391
392 static void irqsoff_tracer_reset(struct trace_array *tr)
393 {
394         if (tr->ctrl)
395                 stop_irqsoff_tracer(tr);
396 }
397
398 static void irqsoff_tracer_ctrl_update(struct trace_array *tr)
399 {
400         if (tr->ctrl)
401                 start_irqsoff_tracer(tr);
402         else
403                 stop_irqsoff_tracer(tr);
404 }
405
406 static void notrace irqsoff_tracer_open(struct trace_iterator *iter)
407 {
408         /* stop the trace while dumping */
409         if (iter->tr->ctrl)
410                 stop_irqsoff_tracer(iter->tr);
411 }
412
413 static void notrace irqsoff_tracer_close(struct trace_iterator *iter)
414 {
415         if (iter->tr->ctrl)
416                 start_irqsoff_tracer(iter->tr);
417 }
418
419 #ifdef CONFIG_IRQSOFF_TRACER
420 static void irqsoff_tracer_init(struct trace_array *tr)
421 {
422         trace_type = TRACER_IRQS_OFF;
423
424         __irqsoff_tracer_init(tr);
425 }
426 static struct tracer irqsoff_tracer __read_mostly =
427 {
428         .name           = "irqsoff",
429         .init           = irqsoff_tracer_init,
430         .reset          = irqsoff_tracer_reset,
431         .open           = irqsoff_tracer_open,
432         .close          = irqsoff_tracer_close,
433         .ctrl_update    = irqsoff_tracer_ctrl_update,
434         .print_max      = 1,
435 };
436 # define register_irqsoff(trace) register_tracer(&trace)
437 #else
438 # define register_irqsoff(trace) do { } while (0)
439 #endif
440
441 #ifdef CONFIG_PREEMPT_TRACER
442 static void preemptoff_tracer_init(struct trace_array *tr)
443 {
444         trace_type = TRACER_PREEMPT_OFF;
445
446         __irqsoff_tracer_init(tr);
447 }
448
449 static struct tracer preemptoff_tracer __read_mostly =
450 {
451         .name           = "preemptoff",
452         .init           = preemptoff_tracer_init,
453         .reset          = irqsoff_tracer_reset,
454         .open           = irqsoff_tracer_open,
455         .close          = irqsoff_tracer_close,
456         .ctrl_update    = irqsoff_tracer_ctrl_update,
457         .print_max      = 1,
458 };
459 # define register_preemptoff(trace) register_tracer(&trace)
460 #else
461 # define register_preemptoff(trace) do { } while (0)
462 #endif
463
464 #if defined(CONFIG_IRQSOFF_TRACER) && \
465         defined(CONFIG_PREEMPT_TRACER)
466
467 static void preemptirqsoff_tracer_init(struct trace_array *tr)
468 {
469         trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
470
471         __irqsoff_tracer_init(tr);
472 }
473
474 static struct tracer preemptirqsoff_tracer __read_mostly =
475 {
476         .name           = "preemptirqsoff",
477         .init           = preemptirqsoff_tracer_init,
478         .reset          = irqsoff_tracer_reset,
479         .open           = irqsoff_tracer_open,
480         .close          = irqsoff_tracer_close,
481         .ctrl_update    = irqsoff_tracer_ctrl_update,
482         .print_max      = 1,
483 };
484
485 # define register_preemptirqsoff(trace) register_tracer(&trace)
486 #else
487 # define register_preemptirqsoff(trace) do { } while (0)
488 #endif
489
490 __init static int init_irqsoff_tracer(void)
491 {
492         register_irqsoff(irqsoff_tracer);
493         register_preemptoff(preemptoff_tracer);
494         register_preemptirqsoff(preemptirqsoff_tracer);
495
496         return 0;
497 }
498 device_initcall(init_irqsoff_tracer);