1 #ifndef _LINUX_KERNEL_TRACE_H
2 #define _LINUX_KERNEL_TRACE_H
5 #include <asm/atomic.h>
6 #include <linux/sched.h>
7 #include <linux/clocksource.h>
8 #include <linux/ring_buffer.h>
9 #include <linux/mmiotrace.h>
10 #include <linux/ftrace.h>
13 __TRACE_FIRST_TYPE = 0,
30 * The trace entry - the most basic unit of tracing. This is what
31 * is printed in the end as a single line in the trace output, such as:
33 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
39 unsigned char preempt_count;
44 * Function trace entry - function address and parent function addres:
47 struct trace_entry ent;
49 unsigned long parent_ip;
51 extern struct tracer boot_tracer;
54 * Context switch trace entry - which task (and prio) we switched from/to:
56 struct ctx_switch_entry {
57 struct trace_entry ent;
58 unsigned int prev_pid;
59 unsigned char prev_prio;
60 unsigned char prev_state;
61 unsigned int next_pid;
62 unsigned char next_prio;
63 unsigned char next_state;
64 unsigned int next_cpu;
68 * Special (free-form) trace entry:
70 struct special_entry {
71 struct trace_entry ent;
81 #define FTRACE_STACK_ENTRIES 8
84 struct trace_entry ent;
85 unsigned long caller[FTRACE_STACK_ENTRIES];
89 * ftrace_printk entry:
92 struct trace_entry ent;
97 #define TRACE_OLD_SIZE 88
99 struct trace_field_cont {
101 /* Temporary till we get rid of this completely */
102 char buf[TRACE_OLD_SIZE - 1];
105 struct trace_mmiotrace_rw {
106 struct trace_entry ent;
107 struct mmiotrace_rw rw;
110 struct trace_mmiotrace_map {
111 struct trace_entry ent;
112 struct mmiotrace_map map;
116 struct trace_entry ent;
117 struct boot_trace initcall;
121 * trace_flag_type is an enumeration that holds different
122 * states when a trace occurs. These are:
123 * IRQS_OFF - interrupts were disabled
124 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
125 * NEED_RESCED - reschedule is requested
126 * HARDIRQ - inside an interrupt handler
127 * SOFTIRQ - inside a softirq handler
128 * CONT - multiple entries hold the trace item
130 enum trace_flag_type {
131 TRACE_FLAG_IRQS_OFF = 0x01,
132 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
133 TRACE_FLAG_NEED_RESCHED = 0x04,
134 TRACE_FLAG_HARDIRQ = 0x08,
135 TRACE_FLAG_SOFTIRQ = 0x10,
136 TRACE_FLAG_CONT = 0x20,
139 #define TRACE_BUF_SIZE 1024
142 * The CPU trace array - it consists of thousands of trace entries
143 * plus some other descriptor data: (for example which task started
146 struct trace_array_cpu {
149 /* these fields get copied into max-trace: */
150 unsigned long trace_idx;
151 unsigned long overrun;
152 unsigned long saved_latency;
153 unsigned long critical_start;
154 unsigned long critical_end;
155 unsigned long critical_sequence;
157 unsigned long policy;
158 unsigned long rt_priority;
159 cycle_t preempt_timestamp;
162 char comm[TASK_COMM_LEN];
165 struct trace_iterator;
168 * The trace array - an array of per-CPU trace arrays. This is the
169 * highest level data structure that individual tracers deal with.
170 * They have on/off state as well:
173 struct ring_buffer *buffer;
174 unsigned long entries;
178 struct task_struct *waiter;
179 struct trace_array_cpu *data[NR_CPUS];
182 #define FTRACE_CMP_TYPE(var, type) \
183 __builtin_types_compatible_p(typeof(var), type *)
186 #define IF_ASSIGN(var, entry, etype, id) \
187 if (FTRACE_CMP_TYPE(var, etype)) { \
188 var = (typeof(var))(entry); \
189 WARN_ON(id && (entry)->type != id); \
193 /* Will cause compile errors if type is not found. */
194 extern void __ftrace_bad_type(void);
197 * The trace_assign_type is a verifier that the entry type is
198 * the same as the type being assigned. To add new types simply
199 * add a line with the following format:
201 * IF_ASSIGN(var, ent, type, id);
203 * Where "type" is the trace type that includes the trace_entry
204 * as the "ent" item. And "id" is the trace identifier that is
205 * used in the trace_type enum.
207 * If the type can have more than one id, then use zero.
209 #define trace_assign_type(var, ent) \
211 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
212 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
213 IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
214 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
215 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
216 IF_ASSIGN(var, ent, struct special_entry, 0); \
217 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
219 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
221 IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT); \
222 __ftrace_bad_type(); \
225 /* Return values for print_line callback */
227 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
228 TRACE_TYPE_HANDLED = 1,
229 TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */
233 * A specific tracer, represented by methods that operate on a trace array:
237 void (*init)(struct trace_array *tr);
238 void (*reset)(struct trace_array *tr);
239 void (*start)(struct trace_array *tr);
240 void (*stop)(struct trace_array *tr);
241 void (*open)(struct trace_iterator *iter);
242 void (*pipe_open)(struct trace_iterator *iter);
243 void (*close)(struct trace_iterator *iter);
244 ssize_t (*read)(struct trace_iterator *iter,
245 struct file *filp, char __user *ubuf,
246 size_t cnt, loff_t *ppos);
247 void (*ctrl_update)(struct trace_array *tr);
248 #ifdef CONFIG_FTRACE_STARTUP_TEST
249 int (*selftest)(struct tracer *trace,
250 struct trace_array *tr);
252 enum print_line_t (*print_line)(struct trace_iterator *iter);
258 unsigned char buffer[PAGE_SIZE];
260 unsigned int readpos;
264 * Trace iterator - used by printout routines who present trace
265 * results to users and which routines might sleep, etc:
267 struct trace_iterator {
268 struct trace_array *tr;
269 struct tracer *trace;
271 struct ring_buffer_iter *buffer_iter[NR_CPUS];
273 /* The below is zeroed out in pipe_read */
274 struct trace_seq seq;
275 struct trace_entry *ent;
279 unsigned long iter_flags;
284 int tracing_is_enabled(void);
285 void trace_wake_up(void);
286 void tracing_reset(struct trace_array *tr, int cpu);
287 int tracing_open_generic(struct inode *inode, struct file *filp);
288 struct dentry *tracing_init_dentry(void);
289 void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
291 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
292 struct trace_array_cpu *data);
293 void tracing_generic_entry_update(struct trace_entry *entry,
297 void ftrace(struct trace_array *tr,
298 struct trace_array_cpu *data,
300 unsigned long parent_ip,
301 unsigned long flags, int pc);
302 void tracing_sched_switch_trace(struct trace_array *tr,
303 struct trace_array_cpu *data,
304 struct task_struct *prev,
305 struct task_struct *next,
306 unsigned long flags, int pc);
307 void tracing_record_cmdline(struct task_struct *tsk);
309 void tracing_sched_wakeup_trace(struct trace_array *tr,
310 struct trace_array_cpu *data,
311 struct task_struct *wakee,
312 struct task_struct *cur,
313 unsigned long flags, int pc);
314 void trace_special(struct trace_array *tr,
315 struct trace_array_cpu *data,
318 unsigned long arg3, int pc);
319 void trace_function(struct trace_array *tr,
320 struct trace_array_cpu *data,
322 unsigned long parent_ip,
323 unsigned long flags, int pc);
325 void tracing_start_cmdline_record(void);
326 void tracing_stop_cmdline_record(void);
327 void tracing_sched_switch_assign_trace(struct trace_array *tr);
328 void tracing_stop_sched_switch_record(void);
329 void tracing_start_sched_switch_record(void);
330 int register_tracer(struct tracer *type);
331 void unregister_tracer(struct tracer *type);
333 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
335 extern unsigned long tracing_max_latency;
336 extern unsigned long tracing_thresh;
338 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
339 void update_max_tr_single(struct trace_array *tr,
340 struct task_struct *tsk, int cpu);
342 extern cycle_t ftrace_now(int cpu);
344 #ifdef CONFIG_FUNCTION_TRACER
345 void tracing_start_function_trace(void);
346 void tracing_stop_function_trace(void);
348 # define tracing_start_function_trace() do { } while (0)
349 # define tracing_stop_function_trace() do { } while (0)
352 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
354 (*tracer_switch_func_t)(void *private,
356 struct task_struct *prev,
357 struct task_struct *next);
359 struct tracer_switch_ops {
360 tracer_switch_func_t func;
362 struct tracer_switch_ops *next;
365 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
367 #ifdef CONFIG_DYNAMIC_FTRACE
368 extern unsigned long ftrace_update_tot_cnt;
369 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
370 extern int DYN_FTRACE_TEST_NAME(void);
373 #ifdef CONFIG_FTRACE_STARTUP_TEST
374 extern int trace_selftest_startup_function(struct tracer *trace,
375 struct trace_array *tr);
376 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
377 struct trace_array *tr);
378 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
379 struct trace_array *tr);
380 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
381 struct trace_array *tr);
382 extern int trace_selftest_startup_wakeup(struct tracer *trace,
383 struct trace_array *tr);
384 extern int trace_selftest_startup_nop(struct tracer *trace,
385 struct trace_array *tr);
386 extern int trace_selftest_startup_sched_switch(struct tracer *trace,
387 struct trace_array *tr);
388 extern int trace_selftest_startup_sysprof(struct tracer *trace,
389 struct trace_array *tr);
390 #endif /* CONFIG_FTRACE_STARTUP_TEST */
392 extern void *head_page(struct trace_array_cpu *data);
393 extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
394 extern void trace_seq_print_cont(struct trace_seq *s,
395 struct trace_iterator *iter);
396 extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
398 extern long ns2usecs(cycle_t nsec);
399 extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
401 extern unsigned long trace_flags;
404 * trace_iterator_flags is an enumeration that defines bit
405 * positions into trace_flags that controls the output.
407 * NOTE: These bits must match the trace_options array in
410 enum trace_iterator_flags {
411 TRACE_ITER_PRINT_PARENT = 0x01,
412 TRACE_ITER_SYM_OFFSET = 0x02,
413 TRACE_ITER_SYM_ADDR = 0x04,
414 TRACE_ITER_VERBOSE = 0x08,
415 TRACE_ITER_RAW = 0x10,
416 TRACE_ITER_HEX = 0x20,
417 TRACE_ITER_BIN = 0x40,
418 TRACE_ITER_BLOCK = 0x80,
419 TRACE_ITER_STACKTRACE = 0x100,
420 TRACE_ITER_SCHED_TREE = 0x200,
421 TRACE_ITER_PRINTK = 0x400,
422 TRACE_ITER_PREEMPTONLY = 0x800,
425 extern struct tracer nop_trace;
428 * ftrace_preempt_disable - disable preemption scheduler safe
430 * When tracing can happen inside the scheduler, there exists
431 * cases that the tracing might happen before the need_resched
432 * flag is checked. If this happens and the tracer calls
433 * preempt_enable (after a disable), a schedule might take place
434 * causing an infinite recursion.
436 * To prevent this, we read the need_recshed flag before
437 * disabling preemption. When we want to enable preemption we
438 * check the flag, if it is set, then we call preempt_enable_no_resched.
439 * Otherwise, we call preempt_enable.
441 * The rational for doing the above is that if need resched is set
442 * and we have yet to reschedule, we are either in an atomic location
443 * (where we do not need to check for scheduling) or we are inside
444 * the scheduler and do not want to resched.
446 static inline int ftrace_preempt_disable(void)
450 resched = need_resched();
451 preempt_disable_notrace();
457 * ftrace_preempt_enable - enable preemption scheduler safe
458 * @resched: the return value from ftrace_preempt_disable
460 * This is a scheduler safe way to enable preemption and not miss
461 * any preemption checks. The disabled saved the state of preemption.
462 * If resched is set, then we were either inside an atomic or
463 * are inside the scheduler (we would have already scheduled
464 * otherwise). In this case, we do not want to call normal
465 * preempt_enable, but preempt_enable_no_resched instead.
467 static inline void ftrace_preempt_enable(int resched)
470 preempt_enable_no_resched_notrace();
472 preempt_enable_notrace();
475 #endif /* _LINUX_KERNEL_TRACE_H */