2 * Ftrace header. For implementation details beyond the random comments
3 * scattered below, see: Documentation/trace/ftrace-design.txt
6 #ifndef _LINUX_FTRACE_H
7 #define _LINUX_FTRACE_H
9 #include <linux/trace_clock.h>
10 #include <linux/kallsyms.h>
11 #include <linux/linkage.h>
12 #include <linux/bitops.h>
13 #include <linux/ptrace.h>
14 #include <linux/ktime.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/init.h>
20 #include <asm/ftrace.h>
23 * If the arch supports passing the variable contents of
24 * function_trace_op as the third parameter back from the
25 * mcount call, then the arch should define this as 1.
27 #ifndef ARCH_SUPPORTS_FTRACE_OPS
28 #define ARCH_SUPPORTS_FTRACE_OPS 0
32 * If the arch's mcount caller does not support all of ftrace's
33 * features, then it must call an indirect function that
34 * does. Or at least does enough to prevent any unwelcomed side effects.
36 #if !ARCH_SUPPORTS_FTRACE_OPS
37 # define FTRACE_FORCE_LIST_FUNC 1
39 # define FTRACE_FORCE_LIST_FUNC 0
42 /* Main tracing buffer and events set up */
44 void trace_init(void);
45 void early_trace_init(void);
47 static inline void trace_init(void) { }
48 static inline void early_trace_init(void) { }
54 #ifdef CONFIG_FUNCTION_TRACER
56 extern int ftrace_enabled;
58 ftrace_enable_sysctl(struct ctl_table *table, int write,
59 void __user *buffer, size_t *lenp,
64 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
65 struct ftrace_ops *op, struct pt_regs *regs);
67 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
70 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
71 * set in the flags member.
72 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
73 * IPMODIFY are a kind of attribute flags which can be set only before
74 * registering the ftrace_ops, and can not be modified while registered.
75 * Changing those attribute flags after regsitering ftrace_ops will
76 * cause unexpected results.
78 * ENABLED - set/unset when ftrace_ops is registered/unregistered
79 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
80 * allocated ftrace_ops which need special care
81 * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops
82 * could be controlled by following calls:
83 * ftrace_function_local_enable
84 * ftrace_function_local_disable
85 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
86 * and passed to the callback. If this flag is set, but the
87 * architecture does not support passing regs
88 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
89 * ftrace_ops will fail to register, unless the next flag
91 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
92 * handler can handle an arch that does not save regs
93 * (the handler tests if regs == NULL), then it can set
94 * this flag instead. It will not fail registering the ftrace_ops
95 * but, the regs field will be NULL if the arch does not support
96 * passing regs to the handler.
97 * Note, if this flag is set, the SAVE_REGS flag will automatically
98 * get set upon registering the ftrace_ops, if the arch supports it.
99 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
100 * that the call back has its own recursion protection. If it does
101 * not set this, then the ftrace infrastructure will add recursion
102 * protection for the caller.
103 * STUB - The ftrace_ops is just a place holder.
104 * INITIALIZED - The ftrace_ops has already been initialized (first use time
105 * register_ftrace_function() is called, it will initialized the ops)
106 * DELETED - The ops are being deleted, do not let them be registered again.
107 * ADDING - The ops is in the process of being added.
108 * REMOVING - The ops is in the process of being removed.
109 * MODIFYING - The ops is in the process of changing its filter functions.
110 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
111 * The arch specific code sets this flag when it allocated a
112 * trampoline. This lets the arch know that it can update the
113 * trampoline in case the callback function changes.
114 * The ftrace_ops trampoline can be set by the ftrace users, and
115 * in such cases the arch must not modify it. Only the arch ftrace
116 * core code should set this flag.
117 * IPMODIFY - The ops can modify the IP register. This can only be set with
118 * SAVE_REGS. If another ops with this flag set is already registered
119 * for any of the functions that this ops will be registered for, then
120 * this ops will fail to register or set_filter_ip.
121 * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
124 FTRACE_OPS_FL_ENABLED = 1 << 0,
125 FTRACE_OPS_FL_DYNAMIC = 1 << 1,
126 FTRACE_OPS_FL_PER_CPU = 1 << 2,
127 FTRACE_OPS_FL_SAVE_REGS = 1 << 3,
128 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4,
129 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5,
130 FTRACE_OPS_FL_STUB = 1 << 6,
131 FTRACE_OPS_FL_INITIALIZED = 1 << 7,
132 FTRACE_OPS_FL_DELETED = 1 << 8,
133 FTRACE_OPS_FL_ADDING = 1 << 9,
134 FTRACE_OPS_FL_REMOVING = 1 << 10,
135 FTRACE_OPS_FL_MODIFYING = 1 << 11,
136 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
137 FTRACE_OPS_FL_IPMODIFY = 1 << 13,
138 FTRACE_OPS_FL_PID = 1 << 14,
139 FTRACE_OPS_FL_RCU = 1 << 15,
142 #ifdef CONFIG_DYNAMIC_FTRACE
143 /* The hash used to know what functions callbacks trace */
144 struct ftrace_ops_hash {
145 struct ftrace_hash *notrace_hash;
146 struct ftrace_hash *filter_hash;
147 struct mutex regex_lock;
150 void ftrace_free_init_mem(void);
152 static inline void ftrace_free_init_mem(void) { }
156 * Note, ftrace_ops can be referenced outside of RCU protection, unless
157 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
158 * core data, the unregistering of it will perform a scheduling on all CPUs
159 * to make sure that there are no more users. Depending on the load of the
160 * system that may take a bit of time.
162 * Any private data added must also take care not to be freed and if private
163 * data is added to a ftrace_ops that is in core code, the user of the
164 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
168 struct ftrace_ops *next;
171 ftrace_func_t saved_func;
172 int __percpu *disabled;
173 #ifdef CONFIG_DYNAMIC_FTRACE
174 struct ftrace_ops_hash local_hash;
175 struct ftrace_ops_hash *func_hash;
176 struct ftrace_ops_hash old_hash;
177 unsigned long trampoline;
178 unsigned long trampoline_size;
183 * Type of the current tracing.
185 enum ftrace_tracing_type_t {
186 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
187 FTRACE_TYPE_RETURN, /* Hook the return of the function */
190 /* Current tracing type, default is FTRACE_TYPE_ENTER */
191 extern enum ftrace_tracing_type_t ftrace_tracing_type;
194 * The ftrace_ops must be a static and should also
195 * be read_mostly. These functions do modify read_mostly variables
196 * so use them sparely. Never free an ftrace_op or modify the
197 * next pointer after it has been registered. Even after unregistering
198 * it, the next pointer may still be used internally.
200 int register_ftrace_function(struct ftrace_ops *ops);
201 int unregister_ftrace_function(struct ftrace_ops *ops);
202 void clear_ftrace_function(void);
205 * ftrace_function_local_enable - enable ftrace_ops on current cpu
207 * This function enables tracing on current cpu by decreasing
208 * the per cpu control variable.
209 * It must be called with preemption disabled and only on ftrace_ops
210 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
211 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
213 static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
215 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
218 (*this_cpu_ptr(ops->disabled))--;
222 * ftrace_function_local_disable - disable ftrace_ops on current cpu
224 * This function disables tracing on current cpu by increasing
225 * the per cpu control variable.
226 * It must be called with preemption disabled and only on ftrace_ops
227 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
228 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
230 static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
232 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
235 (*this_cpu_ptr(ops->disabled))++;
239 * ftrace_function_local_disabled - returns ftrace_ops disabled value
242 * This function returns value of ftrace_ops::disabled on current cpu.
243 * It must be called with preemption disabled and only on ftrace_ops
244 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
245 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
247 static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
249 WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU));
250 return *this_cpu_ptr(ops->disabled);
253 extern void ftrace_stub(unsigned long a0, unsigned long a1,
254 struct ftrace_ops *op, struct pt_regs *regs);
256 #else /* !CONFIG_FUNCTION_TRACER */
258 * (un)register_ftrace_function must be a macro since the ops parameter
259 * must not be evaluated.
261 #define register_ftrace_function(ops) ({ 0; })
262 #define unregister_ftrace_function(ops) ({ 0; })
263 static inline int ftrace_nr_registered_ops(void)
267 static inline void clear_ftrace_function(void) { }
268 static inline void ftrace_kill(void) { }
269 static inline void ftrace_free_init_mem(void) { }
270 #endif /* CONFIG_FUNCTION_TRACER */
272 #ifdef CONFIG_STACK_TRACER
274 #define STACK_TRACE_ENTRIES 500
278 extern unsigned stack_trace_index[];
279 extern struct stack_trace stack_trace_max;
280 extern unsigned long stack_trace_max_size;
281 extern arch_spinlock_t stack_trace_max_lock;
283 extern int stack_tracer_enabled;
284 void stack_trace_print(void);
286 stack_trace_sysctl(struct ctl_table *table, int write,
287 void __user *buffer, size_t *lenp,
290 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
291 DECLARE_PER_CPU(int, disable_stack_tracer);
294 * stack_tracer_disable - temporarily disable the stack tracer
296 * There's a few locations (namely in RCU) where stack tracing
297 * cannot be executed. This function is used to disable stack
298 * tracing during those critical sections.
300 * This function must be called with preemption or interrupts
301 * disabled and stack_tracer_enable() must be called shortly after
302 * while preemption or interrupts are still disabled.
304 static inline void stack_tracer_disable(void)
306 /* Preemption or interupts must be disabled */
307 if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
308 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
309 this_cpu_inc(disable_stack_tracer);
313 * stack_tracer_enable - re-enable the stack tracer
315 * After stack_tracer_disable() is called, stack_tracer_enable()
316 * must be called shortly afterward.
318 static inline void stack_tracer_enable(void)
320 if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
321 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
322 this_cpu_dec(disable_stack_tracer);
325 static inline void stack_tracer_disable(void) { }
326 static inline void stack_tracer_enable(void) { }
329 struct ftrace_func_command {
330 struct list_head list;
332 int (*func)(struct ftrace_hash *hash,
333 char *func, char *cmd,
334 char *params, int enable);
337 #ifdef CONFIG_DYNAMIC_FTRACE
339 int ftrace_arch_code_modify_prepare(void);
340 int ftrace_arch_code_modify_post_process(void);
344 enum ftrace_bug_type {
351 extern enum ftrace_bug_type ftrace_bug_type;
354 * Archs can set this to point to a variable that holds the value that was
355 * expected at the call site before calling ftrace_bug().
357 extern const void *ftrace_expected;
359 void ftrace_bug(int err, struct dyn_ftrace *rec);
363 extern int ftrace_text_reserved(const void *start, const void *end);
365 extern int ftrace_nr_registered_ops(void);
367 bool is_ftrace_trampoline(unsigned long addr);
370 * The dyn_ftrace record's flags field is split into two parts.
371 * the first part which is '0-FTRACE_REF_MAX' is a counter of
372 * the number of callbacks that have registered the function that
373 * the dyn_ftrace descriptor represents.
375 * The second part is a mask:
376 * ENABLED - the function is being traced
377 * REGS - the record wants the function to save regs
378 * REGS_EN - the function is set up to save regs.
379 * IPMODIFY - the record allows for the IP address to be changed.
380 * DISABLED - the record is not ready to be touched yet
382 * When a new ftrace_ops is registered and wants a function to save
383 * pt_regs, the rec->flag REGS is set. When the function has been
384 * set up to save regs, the REG_EN flag is set. Once a function
385 * starts saving regs it will do so until all ftrace_ops are removed
386 * from tracing that function.
389 FTRACE_FL_ENABLED = (1UL << 31),
390 FTRACE_FL_REGS = (1UL << 30),
391 FTRACE_FL_REGS_EN = (1UL << 29),
392 FTRACE_FL_TRAMP = (1UL << 28),
393 FTRACE_FL_TRAMP_EN = (1UL << 27),
394 FTRACE_FL_IPMODIFY = (1UL << 26),
395 FTRACE_FL_DISABLED = (1UL << 25),
398 #define FTRACE_REF_MAX_SHIFT 25
399 #define FTRACE_FL_BITS 7
400 #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
401 #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
402 #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
404 #define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK)
407 unsigned long ip; /* address of mcount call-site */
409 struct dyn_arch_ftrace arch;
412 int ftrace_force_update(void);
413 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
414 int remove, int reset);
415 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
417 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
419 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
420 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
421 void ftrace_free_filter(struct ftrace_ops *ops);
422 void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
424 int register_ftrace_command(struct ftrace_func_command *cmd);
425 int unregister_ftrace_command(struct ftrace_func_command *cmd);
428 FTRACE_UPDATE_CALLS = (1 << 0),
429 FTRACE_DISABLE_CALLS = (1 << 1),
430 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
431 FTRACE_START_FUNC_RET = (1 << 3),
432 FTRACE_STOP_FUNC_RET = (1 << 4),
436 * The FTRACE_UPDATE_* enum is used to pass information back
437 * from the ftrace_update_record() and ftrace_test_record()
438 * functions. These are called by the code update routines
439 * to find out what is to be done for a given function.
441 * IGNORE - The function is already what we want it to be
442 * MAKE_CALL - Start tracing the function
443 * MODIFY_CALL - Stop saving regs for the function
444 * MAKE_NOP - Stop tracing the function
447 FTRACE_UPDATE_IGNORE,
448 FTRACE_UPDATE_MAKE_CALL,
449 FTRACE_UPDATE_MODIFY_CALL,
450 FTRACE_UPDATE_MAKE_NOP,
454 FTRACE_ITER_FILTER = (1 << 0),
455 FTRACE_ITER_NOTRACE = (1 << 1),
456 FTRACE_ITER_PRINTALL = (1 << 2),
457 FTRACE_ITER_DO_HASH = (1 << 3),
458 FTRACE_ITER_HASH = (1 << 4),
459 FTRACE_ITER_ENABLED = (1 << 5),
462 void arch_ftrace_update_code(int command);
464 struct ftrace_rec_iter;
466 struct ftrace_rec_iter *ftrace_rec_iter_start(void);
467 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
468 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
470 #define for_ftrace_rec_iter(iter) \
471 for (iter = ftrace_rec_iter_start(); \
473 iter = ftrace_rec_iter_next(iter))
476 int ftrace_update_record(struct dyn_ftrace *rec, int enable);
477 int ftrace_test_record(struct dyn_ftrace *rec, int enable);
478 void ftrace_run_stop_machine(int command);
479 unsigned long ftrace_location(unsigned long ip);
480 unsigned long ftrace_location_range(unsigned long start, unsigned long end);
481 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
482 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
484 extern ftrace_func_t ftrace_trace_function;
486 int ftrace_regex_open(struct ftrace_ops *ops, int flag,
487 struct inode *inode, struct file *file);
488 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
489 size_t cnt, loff_t *ppos);
490 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
491 size_t cnt, loff_t *ppos);
492 int ftrace_regex_release(struct inode *inode, struct file *file);
495 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
497 /* defined in arch */
498 extern int ftrace_ip_converted(unsigned long ip);
499 extern int ftrace_dyn_arch_init(void);
500 extern void ftrace_replace_code(int enable);
501 extern int ftrace_update_ftrace_func(ftrace_func_t func);
502 extern void ftrace_caller(void);
503 extern void ftrace_regs_caller(void);
504 extern void ftrace_call(void);
505 extern void ftrace_regs_call(void);
506 extern void mcount_call(void);
508 void ftrace_modify_all_code(int command);
511 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
514 #ifndef FTRACE_GRAPH_ADDR
515 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
518 #ifndef FTRACE_REGS_ADDR
519 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
520 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
522 # define FTRACE_REGS_ADDR FTRACE_ADDR
527 * If an arch would like functions that are only traced
528 * by the function graph tracer to jump directly to its own
529 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
530 * to be that address to jump to.
532 #ifndef FTRACE_GRAPH_TRAMP_ADDR
533 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
536 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
537 extern void ftrace_graph_caller(void);
538 extern int ftrace_enable_ftrace_graph_caller(void);
539 extern int ftrace_disable_ftrace_graph_caller(void);
541 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
542 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
546 * ftrace_make_nop - convert code into nop
547 * @mod: module structure if called by module load initialization
548 * @rec: the mcount call site record
549 * @addr: the address that the call site should be calling
551 * This is a very sensitive operation and great care needs
552 * to be taken by the arch. The operation should carefully
553 * read the location, check to see if what is read is indeed
554 * what we expect it to be, and then on success of the compare,
555 * it should write to the location.
557 * The code segment at @rec->ip should be a caller to @addr
561 * -EFAULT on error reading the location
562 * -EINVAL on a failed compare of the contents
563 * -EPERM on error writing to the location
564 * Any other value will be considered a failure.
566 extern int ftrace_make_nop(struct module *mod,
567 struct dyn_ftrace *rec, unsigned long addr);
570 * ftrace_make_call - convert a nop call site into a call to addr
571 * @rec: the mcount call site record
572 * @addr: the address that the call site should call
574 * This is a very sensitive operation and great care needs
575 * to be taken by the arch. The operation should carefully
576 * read the location, check to see if what is read is indeed
577 * what we expect it to be, and then on success of the compare,
578 * it should write to the location.
580 * The code segment at @rec->ip should be a nop
584 * -EFAULT on error reading the location
585 * -EINVAL on a failed compare of the contents
586 * -EPERM on error writing to the location
587 * Any other value will be considered a failure.
589 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
591 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
593 * ftrace_modify_call - convert from one addr to another (no nop)
594 * @rec: the mcount call site record
595 * @old_addr: the address expected to be currently called to
596 * @addr: the address to change to
598 * This is a very sensitive operation and great care needs
599 * to be taken by the arch. The operation should carefully
600 * read the location, check to see if what is read is indeed
601 * what we expect it to be, and then on success of the compare,
602 * it should write to the location.
604 * The code segment at @rec->ip should be a caller to @old_addr
608 * -EFAULT on error reading the location
609 * -EINVAL on a failed compare of the contents
610 * -EPERM on error writing to the location
611 * Any other value will be considered a failure.
613 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
616 /* Should never be called */
617 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
624 /* May be defined in arch */
625 extern int ftrace_arch_read_dyn_info(char *buf, int size);
627 extern int skip_trace(unsigned long ip);
628 extern void ftrace_module_init(struct module *mod);
629 extern void ftrace_module_enable(struct module *mod);
630 extern void ftrace_release_mod(struct module *mod);
632 extern void ftrace_disable_daemon(void);
633 extern void ftrace_enable_daemon(void);
634 #else /* CONFIG_DYNAMIC_FTRACE */
635 static inline int skip_trace(unsigned long ip) { return 0; }
636 static inline int ftrace_force_update(void) { return 0; }
637 static inline void ftrace_disable_daemon(void) { }
638 static inline void ftrace_enable_daemon(void) { }
639 static inline void ftrace_module_init(struct module *mod) { }
640 static inline void ftrace_module_enable(struct module *mod) { }
641 static inline void ftrace_release_mod(struct module *mod) { }
642 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
646 static inline __init int unregister_ftrace_command(char *cmd_name)
650 static inline int ftrace_text_reserved(const void *start, const void *end)
654 static inline unsigned long ftrace_location(unsigned long ip)
660 * Again users of functions that have ftrace_ops may not
661 * have them defined when ftrace is not enabled, but these
662 * functions may still be called. Use a macro instead of inline.
664 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
665 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
666 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
667 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
668 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
669 #define ftrace_free_filter(ops) do { } while (0)
670 #define ftrace_ops_set_global_filter(ops) do { } while (0)
672 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
673 size_t cnt, loff_t *ppos) { return -ENODEV; }
674 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
675 size_t cnt, loff_t *ppos) { return -ENODEV; }
677 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
679 static inline bool is_ftrace_trampoline(unsigned long addr)
683 #endif /* CONFIG_DYNAMIC_FTRACE */
685 /* totally disable ftrace - can not re-enable after this */
686 void ftrace_kill(void);
688 static inline void tracer_disable(void)
690 #ifdef CONFIG_FUNCTION_TRACER
696 * Ftrace disable/restore without lock. Some synchronization mechanism
697 * must be used to prevent ftrace_enabled to be changed between
700 static inline int __ftrace_enabled_save(void)
702 #ifdef CONFIG_FUNCTION_TRACER
703 int saved_ftrace_enabled = ftrace_enabled;
705 return saved_ftrace_enabled;
711 static inline void __ftrace_enabled_restore(int enabled)
713 #ifdef CONFIG_FUNCTION_TRACER
714 ftrace_enabled = enabled;
718 /* All archs should have this, but we define it for consistency */
719 #ifndef ftrace_return_address0
720 # define ftrace_return_address0 __builtin_return_address(0)
723 /* Archs may use other ways for ADDR1 and beyond */
724 #ifndef ftrace_return_address
725 # ifdef CONFIG_FRAME_POINTER
726 # define ftrace_return_address(n) __builtin_return_address(n)
728 # define ftrace_return_address(n) 0UL
732 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
733 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
734 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
735 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
736 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
737 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
738 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
740 static inline unsigned long get_lock_parent_ip(void)
742 unsigned long addr = CALLER_ADDR0;
744 if (!in_lock_functions(addr))
747 if (!in_lock_functions(addr))
752 #ifdef CONFIG_IRQSOFF_TRACER
753 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
754 extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
756 static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
757 static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
760 #ifdef CONFIG_PREEMPT_TRACER
761 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
762 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
765 * Use defines instead of static inlines because some arches will make code out
766 * of the CALLER_ADDR, when we really want these to be a real nop.
768 # define trace_preempt_on(a0, a1) do { } while (0)
769 # define trace_preempt_off(a0, a1) do { } while (0)
772 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
773 extern void ftrace_init(void);
775 static inline void ftrace_init(void) { }
779 * Structure that defines an entry function trace.
780 * It's already packed but the attribute "packed" is needed
781 * to remove extra padding at the end.
783 struct ftrace_graph_ent {
784 unsigned long func; /* Current function */
789 * Structure that defines a return function trace.
790 * It's already packed but the attribute "packed" is needed
791 * to remove extra padding at the end.
793 struct ftrace_graph_ret {
794 unsigned long func; /* Current function */
795 /* Number of functions that overran the depth limit for current task */
796 unsigned long overrun;
797 unsigned long long calltime;
798 unsigned long long rettime;
802 /* Type of the callback handlers for tracing function graph*/
803 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
804 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
806 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
809 #define INIT_FTRACE_GRAPH .ret_stack = NULL,
812 * Stack of return addresses for functions
814 * Used in struct thread_info
816 struct ftrace_ret_stack {
819 unsigned long long calltime;
820 #ifdef CONFIG_FUNCTION_PROFILER
821 unsigned long long subtime;
823 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
826 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
832 * Primary handler of a function return.
833 * It relays on ftrace_return_to_handler.
834 * Defined in entry_32/64.S
836 extern void return_to_handler(void);
839 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
840 unsigned long frame_pointer, unsigned long *retp);
842 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
843 unsigned long ret, unsigned long *retp);
846 * Sometimes we don't want to trace a function with the function
847 * graph tracer but we want them to keep traced by the usual function
848 * tracer if the function graph tracer is not configured.
850 #define __notrace_funcgraph notrace
852 #define FTRACE_NOTRACE_DEPTH 65536
853 #define FTRACE_RETFUNC_DEPTH 50
854 #define FTRACE_RETSTACK_ALLOC_SIZE 32
855 extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
856 trace_func_graph_ent_t entryfunc);
858 extern bool ftrace_graph_is_dead(void);
859 extern void ftrace_graph_stop(void);
861 /* The current handlers in use */
862 extern trace_func_graph_ret_t ftrace_graph_return;
863 extern trace_func_graph_ent_t ftrace_graph_entry;
865 extern void unregister_ftrace_graph(void);
867 extern void ftrace_graph_init_task(struct task_struct *t);
868 extern void ftrace_graph_exit_task(struct task_struct *t);
869 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
871 static inline int task_curr_ret_stack(struct task_struct *t)
873 return t->curr_ret_stack;
876 static inline void pause_graph_tracing(void)
878 atomic_inc(¤t->tracing_graph_pause);
881 static inline void unpause_graph_tracing(void)
883 atomic_dec(¤t->tracing_graph_pause);
885 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
887 #define __notrace_funcgraph
888 #define INIT_FTRACE_GRAPH
890 static inline void ftrace_graph_init_task(struct task_struct *t) { }
891 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
892 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
894 static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
895 trace_func_graph_ent_t entryfunc)
899 static inline void unregister_ftrace_graph(void) { }
901 static inline int task_curr_ret_stack(struct task_struct *tsk)
906 static inline unsigned long
907 ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
913 static inline void pause_graph_tracing(void) { }
914 static inline void unpause_graph_tracing(void) { }
915 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
917 #ifdef CONFIG_TRACING
919 /* flags for current->trace */
921 TSK_TRACE_FL_TRACE_BIT = 0,
922 TSK_TRACE_FL_GRAPH_BIT = 1,
925 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
926 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
929 static inline void set_tsk_trace_trace(struct task_struct *tsk)
931 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
934 static inline void clear_tsk_trace_trace(struct task_struct *tsk)
936 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
939 static inline int test_tsk_trace_trace(struct task_struct *tsk)
941 return tsk->trace & TSK_TRACE_FL_TRACE;
944 static inline void set_tsk_trace_graph(struct task_struct *tsk)
946 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
949 static inline void clear_tsk_trace_graph(struct task_struct *tsk)
951 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
954 static inline int test_tsk_trace_graph(struct task_struct *tsk)
956 return tsk->trace & TSK_TRACE_FL_GRAPH;
959 enum ftrace_dump_mode;
961 extern enum ftrace_dump_mode ftrace_dump_on_oops;
962 extern int tracepoint_printk;
964 extern void disable_trace_on_warning(void);
965 extern int __disable_trace_on_warning;
967 #ifdef CONFIG_PREEMPT
968 #define INIT_TRACE_RECURSION .trace_recursion = 0,
971 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
972 void __user *buffer, size_t *lenp,
975 #else /* CONFIG_TRACING */
976 static inline void disable_trace_on_warning(void) { }
977 #endif /* CONFIG_TRACING */
979 #ifndef INIT_TRACE_RECURSION
980 #define INIT_TRACE_RECURSION
983 #ifdef CONFIG_FTRACE_SYSCALLS
985 unsigned long arch_syscall_addr(int nr);
987 #endif /* CONFIG_FTRACE_SYSCALLS */
989 #endif /* _LINUX_FTRACE_H */