2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
30 #include <asm/ftrace.h>
34 #define FTRACE_WARN_ON(cond) \
40 #define FTRACE_WARN_ON_ONCE(cond) \
42 if (WARN_ON_ONCE(cond)) \
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly;
48 static int last_ftrace_enabled;
50 /* ftrace_pid_trace >= 0 will only trace threads with this pid */
51 static int ftrace_pid_trace = -1;
53 /* Quick disabling of function tracer. */
54 int function_trace_stop;
57 * ftrace_disabled is set when an anomaly is discovered.
58 * ftrace_disabled is much stronger than ftrace_enabled.
60 static int ftrace_disabled __read_mostly;
62 static DEFINE_SPINLOCK(ftrace_lock);
63 static DEFINE_MUTEX(ftrace_sysctl_lock);
64 static DEFINE_MUTEX(ftrace_start_lock);
66 static struct ftrace_ops ftrace_list_end __read_mostly =
71 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
72 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
73 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
74 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
76 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
78 struct ftrace_ops *op = ftrace_list;
80 /* in case someone actually ports this to alpha! */
81 read_barrier_depends();
83 while (op != &ftrace_list_end) {
85 read_barrier_depends();
86 op->func(ip, parent_ip);
91 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
93 if (current->pid != ftrace_pid_trace)
96 ftrace_pid_function(ip, parent_ip);
99 static void set_ftrace_pid_function(ftrace_func_t func)
101 /* do not set ftrace_pid_function to itself! */
102 if (func != ftrace_pid_func)
103 ftrace_pid_function = func;
107 * clear_ftrace_function - reset the ftrace function
109 * This NULLs the ftrace function and in essence stops
110 * tracing. There may be lag
112 void clear_ftrace_function(void)
114 ftrace_trace_function = ftrace_stub;
115 __ftrace_trace_function = ftrace_stub;
116 ftrace_pid_function = ftrace_stub;
119 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
121 * For those archs that do not test ftrace_trace_stop in their
122 * mcount call site, we need to do it from C.
124 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
126 if (function_trace_stop)
129 __ftrace_trace_function(ip, parent_ip);
133 static int __register_ftrace_function(struct ftrace_ops *ops)
135 /* should not be called from interrupt context */
136 spin_lock(&ftrace_lock);
138 ops->next = ftrace_list;
140 * We are entering ops into the ftrace_list but another
141 * CPU might be walking that list. We need to make sure
142 * the ops->next pointer is valid before another CPU sees
143 * the ops pointer included into the ftrace_list.
148 if (ftrace_enabled) {
151 if (ops->next == &ftrace_list_end)
154 func = ftrace_list_func;
156 if (ftrace_pid_trace >= 0) {
157 set_ftrace_pid_function(func);
158 func = ftrace_pid_func;
162 * For one func, simply call it directly.
163 * For more than one func, call the chain.
165 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
166 ftrace_trace_function = func;
168 __ftrace_trace_function = func;
169 ftrace_trace_function = ftrace_test_stop_func;
173 spin_unlock(&ftrace_lock);
178 static int __unregister_ftrace_function(struct ftrace_ops *ops)
180 struct ftrace_ops **p;
183 /* should not be called from interrupt context */
184 spin_lock(&ftrace_lock);
187 * If we are removing the last function, then simply point
188 * to the ftrace_stub.
190 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
191 ftrace_trace_function = ftrace_stub;
192 ftrace_list = &ftrace_list_end;
196 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
207 if (ftrace_enabled) {
208 /* If we only have one func left, then call that directly */
209 if (ftrace_list->next == &ftrace_list_end) {
210 ftrace_func_t func = ftrace_list->func;
212 if (ftrace_pid_trace >= 0) {
213 set_ftrace_pid_function(func);
214 func = ftrace_pid_func;
216 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
217 ftrace_trace_function = func;
219 __ftrace_trace_function = func;
225 spin_unlock(&ftrace_lock);
230 static void ftrace_update_pid_func(void)
234 /* should not be called from interrupt context */
235 spin_lock(&ftrace_lock);
237 if (ftrace_trace_function == ftrace_stub)
240 func = ftrace_trace_function;
242 if (ftrace_pid_trace >= 0) {
243 set_ftrace_pid_function(func);
244 func = ftrace_pid_func;
246 if (func == ftrace_pid_func)
247 func = ftrace_pid_function;
250 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
251 ftrace_trace_function = func;
253 __ftrace_trace_function = func;
257 spin_unlock(&ftrace_lock);
260 #ifdef CONFIG_DYNAMIC_FTRACE
261 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
262 # error Dynamic ftrace depends on MCOUNT_RECORD
266 * Since MCOUNT_ADDR may point to mcount itself, we do not want
267 * to get it confused by reading a reference in the code as we
268 * are parsing on objcopy output of text. Use a variable for
271 static unsigned long mcount_addr = MCOUNT_ADDR;
274 FTRACE_ENABLE_CALLS = (1 << 0),
275 FTRACE_DISABLE_CALLS = (1 << 1),
276 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
277 FTRACE_ENABLE_MCOUNT = (1 << 3),
278 FTRACE_DISABLE_MCOUNT = (1 << 4),
279 FTRACE_START_FUNC_RET = (1 << 5),
280 FTRACE_STOP_FUNC_RET = (1 << 6),
283 static int ftrace_filtered;
285 static LIST_HEAD(ftrace_new_addrs);
287 static DEFINE_MUTEX(ftrace_regex_lock);
290 struct ftrace_page *next;
292 struct dyn_ftrace records[];
295 #define ENTRIES_PER_PAGE \
296 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
298 /* estimate from running different kernels */
299 #define NR_TO_INIT 10000
301 static struct ftrace_page *ftrace_pages_start;
302 static struct ftrace_page *ftrace_pages;
304 static struct dyn_ftrace *ftrace_free_records;
307 #ifdef CONFIG_KPROBES
309 static int frozen_record_count;
311 static inline void freeze_record(struct dyn_ftrace *rec)
313 if (!(rec->flags & FTRACE_FL_FROZEN)) {
314 rec->flags |= FTRACE_FL_FROZEN;
315 frozen_record_count++;
319 static inline void unfreeze_record(struct dyn_ftrace *rec)
321 if (rec->flags & FTRACE_FL_FROZEN) {
322 rec->flags &= ~FTRACE_FL_FROZEN;
323 frozen_record_count--;
327 static inline int record_frozen(struct dyn_ftrace *rec)
329 return rec->flags & FTRACE_FL_FROZEN;
332 # define freeze_record(rec) ({ 0; })
333 # define unfreeze_record(rec) ({ 0; })
334 # define record_frozen(rec) ({ 0; })
335 #endif /* CONFIG_KPROBES */
337 static void ftrace_free_rec(struct dyn_ftrace *rec)
339 rec->ip = (unsigned long)ftrace_free_records;
340 ftrace_free_records = rec;
341 rec->flags |= FTRACE_FL_FREE;
344 void ftrace_release(void *start, unsigned long size)
346 struct dyn_ftrace *rec;
347 struct ftrace_page *pg;
348 unsigned long s = (unsigned long)start;
349 unsigned long e = s + size;
352 if (ftrace_disabled || !start)
355 /* should not be called from interrupt context */
356 spin_lock(&ftrace_lock);
358 for (pg = ftrace_pages_start; pg; pg = pg->next) {
359 for (i = 0; i < pg->index; i++) {
360 rec = &pg->records[i];
362 if ((rec->ip >= s) && (rec->ip < e))
363 ftrace_free_rec(rec);
366 spin_unlock(&ftrace_lock);
369 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
371 struct dyn_ftrace *rec;
373 /* First check for freed records */
374 if (ftrace_free_records) {
375 rec = ftrace_free_records;
377 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
378 FTRACE_WARN_ON_ONCE(1);
379 ftrace_free_records = NULL;
383 ftrace_free_records = (void *)rec->ip;
384 memset(rec, 0, sizeof(*rec));
388 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
389 if (!ftrace_pages->next) {
390 /* allocate another page */
392 (void *)get_zeroed_page(GFP_KERNEL);
393 if (!ftrace_pages->next)
396 ftrace_pages = ftrace_pages->next;
399 return &ftrace_pages->records[ftrace_pages->index++];
402 static struct dyn_ftrace *
403 ftrace_record_ip(unsigned long ip)
405 struct dyn_ftrace *rec;
410 rec = ftrace_alloc_dyn_node(ip);
416 list_add(&rec->list, &ftrace_new_addrs);
421 static void print_ip_ins(const char *fmt, unsigned char *p)
425 printk(KERN_CONT "%s", fmt);
427 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
428 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
431 static void ftrace_bug(int failed, unsigned long ip)
435 FTRACE_WARN_ON_ONCE(1);
436 pr_info("ftrace faulted on modifying ");
440 FTRACE_WARN_ON_ONCE(1);
441 pr_info("ftrace failed to modify ");
443 print_ip_ins(" actual: ", (unsigned char *)ip);
444 printk(KERN_CONT "\n");
447 FTRACE_WARN_ON_ONCE(1);
448 pr_info("ftrace faulted on writing ");
452 FTRACE_WARN_ON_ONCE(1);
453 pr_info("ftrace faulted on unknown error ");
460 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
462 unsigned long ip, fl;
463 unsigned long ftrace_addr;
465 ftrace_addr = (unsigned long)ftrace_caller;
470 * If this record is not to be traced and
471 * it is not enabled then do nothing.
473 * If this record is not to be traced and
474 * it is enabled then disabled it.
477 if (rec->flags & FTRACE_FL_NOTRACE) {
478 if (rec->flags & FTRACE_FL_ENABLED)
479 rec->flags &= ~FTRACE_FL_ENABLED;
483 } else if (ftrace_filtered && enable) {
488 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
490 /* Record is filtered and enabled, do nothing */
491 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
494 /* Record is not filtered and is not enabled do nothing */
498 /* Record is not filtered but enabled, disable it */
499 if (fl == FTRACE_FL_ENABLED)
500 rec->flags &= ~FTRACE_FL_ENABLED;
502 /* Otherwise record is filtered but not enabled, enable it */
503 rec->flags |= FTRACE_FL_ENABLED;
505 /* Disable or not filtered */
508 /* if record is enabled, do nothing */
509 if (rec->flags & FTRACE_FL_ENABLED)
512 rec->flags |= FTRACE_FL_ENABLED;
516 /* if record is not enabled do nothing */
517 if (!(rec->flags & FTRACE_FL_ENABLED))
520 rec->flags &= ~FTRACE_FL_ENABLED;
524 if (rec->flags & FTRACE_FL_ENABLED)
525 return ftrace_make_call(rec, ftrace_addr);
527 return ftrace_make_nop(NULL, rec, ftrace_addr);
530 static void ftrace_replace_code(int enable)
533 struct dyn_ftrace *rec;
534 struct ftrace_page *pg;
536 for (pg = ftrace_pages_start; pg; pg = pg->next) {
537 for (i = 0; i < pg->index; i++) {
538 rec = &pg->records[i];
541 * Skip over free records and records that have
544 if (rec->flags & FTRACE_FL_FREE ||
545 rec->flags & FTRACE_FL_FAILED)
548 /* ignore updates to this record's mcount site */
549 if (get_kprobe((void *)rec->ip)) {
553 unfreeze_record(rec);
556 failed = __ftrace_replace_code(rec, enable);
557 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
558 rec->flags |= FTRACE_FL_FAILED;
559 if ((system_state == SYSTEM_BOOTING) ||
560 !core_kernel_text(rec->ip)) {
561 ftrace_free_rec(rec);
563 ftrace_bug(failed, rec->ip);
570 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
577 ret = ftrace_make_nop(mod, rec, mcount_addr);
580 rec->flags |= FTRACE_FL_FAILED;
586 static int __ftrace_modify_code(void *data)
590 if (*command & FTRACE_ENABLE_CALLS)
591 ftrace_replace_code(1);
592 else if (*command & FTRACE_DISABLE_CALLS)
593 ftrace_replace_code(0);
595 if (*command & FTRACE_UPDATE_TRACE_FUNC)
596 ftrace_update_ftrace_func(ftrace_trace_function);
598 if (*command & FTRACE_START_FUNC_RET)
599 ftrace_enable_ftrace_graph_caller();
600 else if (*command & FTRACE_STOP_FUNC_RET)
601 ftrace_disable_ftrace_graph_caller();
606 static void ftrace_run_update_code(int command)
608 stop_machine(__ftrace_modify_code, &command, NULL);
611 static ftrace_func_t saved_ftrace_func;
612 static int ftrace_start_up;
614 static void ftrace_startup_enable(int command)
616 if (saved_ftrace_func != ftrace_trace_function) {
617 saved_ftrace_func = ftrace_trace_function;
618 command |= FTRACE_UPDATE_TRACE_FUNC;
621 if (!command || !ftrace_enabled)
624 ftrace_run_update_code(command);
627 static void ftrace_startup(int command)
629 if (unlikely(ftrace_disabled))
632 mutex_lock(&ftrace_start_lock);
634 command |= FTRACE_ENABLE_CALLS;
636 ftrace_startup_enable(command);
638 mutex_unlock(&ftrace_start_lock);
641 static void ftrace_shutdown(int command)
643 if (unlikely(ftrace_disabled))
646 mutex_lock(&ftrace_start_lock);
648 if (!ftrace_start_up)
649 command |= FTRACE_DISABLE_CALLS;
651 if (saved_ftrace_func != ftrace_trace_function) {
652 saved_ftrace_func = ftrace_trace_function;
653 command |= FTRACE_UPDATE_TRACE_FUNC;
656 if (!command || !ftrace_enabled)
659 ftrace_run_update_code(command);
661 mutex_unlock(&ftrace_start_lock);
664 static void ftrace_startup_sysctl(void)
666 int command = FTRACE_ENABLE_MCOUNT;
668 if (unlikely(ftrace_disabled))
671 mutex_lock(&ftrace_start_lock);
672 /* Force update next time */
673 saved_ftrace_func = NULL;
674 /* ftrace_start_up is true if we want ftrace running */
676 command |= FTRACE_ENABLE_CALLS;
678 ftrace_run_update_code(command);
679 mutex_unlock(&ftrace_start_lock);
682 static void ftrace_shutdown_sysctl(void)
684 int command = FTRACE_DISABLE_MCOUNT;
686 if (unlikely(ftrace_disabled))
689 mutex_lock(&ftrace_start_lock);
690 /* ftrace_start_up is true if ftrace is running */
692 command |= FTRACE_DISABLE_CALLS;
694 ftrace_run_update_code(command);
695 mutex_unlock(&ftrace_start_lock);
698 static cycle_t ftrace_update_time;
699 static unsigned long ftrace_update_cnt;
700 unsigned long ftrace_update_tot_cnt;
702 static int ftrace_update_code(struct module *mod)
704 struct dyn_ftrace *p, *t;
707 start = ftrace_now(raw_smp_processor_id());
708 ftrace_update_cnt = 0;
710 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
712 /* If something went wrong, bail without enabling anything */
713 if (unlikely(ftrace_disabled))
716 list_del_init(&p->list);
718 /* convert record (i.e, patch mcount-call with NOP) */
719 if (ftrace_code_disable(mod, p)) {
720 p->flags |= FTRACE_FL_CONVERTED;
726 stop = ftrace_now(raw_smp_processor_id());
727 ftrace_update_time = stop - start;
728 ftrace_update_tot_cnt += ftrace_update_cnt;
733 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
735 struct ftrace_page *pg;
739 /* allocate a few pages */
740 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
741 if (!ftrace_pages_start)
745 * Allocate a few more pages.
747 * TODO: have some parser search vmlinux before
748 * final linking to find all calls to ftrace.
750 * a) know how many pages to allocate.
752 * b) set up the table then.
754 * The dynamic code is still necessary for
758 pg = ftrace_pages = ftrace_pages_start;
760 cnt = num_to_init / ENTRIES_PER_PAGE;
761 pr_info("ftrace: allocating %ld entries in %d pages\n",
762 num_to_init, cnt + 1);
764 for (i = 0; i < cnt; i++) {
765 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
767 /* If we fail, we'll try later anyway */
778 FTRACE_ITER_FILTER = (1 << 0),
779 FTRACE_ITER_CONT = (1 << 1),
780 FTRACE_ITER_NOTRACE = (1 << 2),
781 FTRACE_ITER_FAILURES = (1 << 3),
784 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
786 struct ftrace_iterator {
787 struct ftrace_page *pg;
790 unsigned char buffer[FTRACE_BUFF_MAX+1];
796 t_next(struct seq_file *m, void *v, loff_t *pos)
798 struct ftrace_iterator *iter = m->private;
799 struct dyn_ftrace *rec = NULL;
803 /* should not be called from interrupt context */
804 spin_lock(&ftrace_lock);
806 if (iter->idx >= iter->pg->index) {
807 if (iter->pg->next) {
808 iter->pg = iter->pg->next;
815 rec = &iter->pg->records[iter->idx++];
816 if ((rec->flags & FTRACE_FL_FREE) ||
818 (!(iter->flags & FTRACE_ITER_FAILURES) &&
819 (rec->flags & FTRACE_FL_FAILED)) ||
821 ((iter->flags & FTRACE_ITER_FAILURES) &&
822 !(rec->flags & FTRACE_FL_FAILED)) ||
824 ((iter->flags & FTRACE_ITER_FILTER) &&
825 !(rec->flags & FTRACE_FL_FILTER)) ||
827 ((iter->flags & FTRACE_ITER_NOTRACE) &&
828 !(rec->flags & FTRACE_FL_NOTRACE))) {
833 spin_unlock(&ftrace_lock);
838 static void *t_start(struct seq_file *m, loff_t *pos)
840 struct ftrace_iterator *iter = m->private;
850 p = t_next(m, p, pos);
855 static void t_stop(struct seq_file *m, void *p)
859 static int t_show(struct seq_file *m, void *v)
861 struct dyn_ftrace *rec = v;
862 char str[KSYM_SYMBOL_LEN];
867 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
869 seq_printf(m, "%s\n", str);
874 static struct seq_operations show_ftrace_seq_ops = {
882 ftrace_avail_open(struct inode *inode, struct file *file)
884 struct ftrace_iterator *iter;
887 if (unlikely(ftrace_disabled))
890 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
894 iter->pg = ftrace_pages_start;
896 ret = seq_open(file, &show_ftrace_seq_ops);
898 struct seq_file *m = file->private_data;
908 int ftrace_avail_release(struct inode *inode, struct file *file)
910 struct seq_file *m = (struct seq_file *)file->private_data;
911 struct ftrace_iterator *iter = m->private;
913 seq_release(inode, file);
920 ftrace_failures_open(struct inode *inode, struct file *file)
924 struct ftrace_iterator *iter;
926 ret = ftrace_avail_open(inode, file);
928 m = (struct seq_file *)file->private_data;
929 iter = (struct ftrace_iterator *)m->private;
930 iter->flags = FTRACE_ITER_FAILURES;
937 static void ftrace_filter_reset(int enable)
939 struct ftrace_page *pg;
940 struct dyn_ftrace *rec;
941 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
944 /* should not be called from interrupt context */
945 spin_lock(&ftrace_lock);
948 pg = ftrace_pages_start;
950 for (i = 0; i < pg->index; i++) {
951 rec = &pg->records[i];
952 if (rec->flags & FTRACE_FL_FAILED)
958 spin_unlock(&ftrace_lock);
962 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
964 struct ftrace_iterator *iter;
967 if (unlikely(ftrace_disabled))
970 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
974 mutex_lock(&ftrace_regex_lock);
975 if ((file->f_mode & FMODE_WRITE) &&
976 !(file->f_flags & O_APPEND))
977 ftrace_filter_reset(enable);
979 if (file->f_mode & FMODE_READ) {
980 iter->pg = ftrace_pages_start;
981 iter->flags = enable ? FTRACE_ITER_FILTER :
984 ret = seq_open(file, &show_ftrace_seq_ops);
986 struct seq_file *m = file->private_data;
991 file->private_data = iter;
992 mutex_unlock(&ftrace_regex_lock);
998 ftrace_filter_open(struct inode *inode, struct file *file)
1000 return ftrace_regex_open(inode, file, 1);
1004 ftrace_notrace_open(struct inode *inode, struct file *file)
1006 return ftrace_regex_open(inode, file, 0);
1010 ftrace_regex_read(struct file *file, char __user *ubuf,
1011 size_t cnt, loff_t *ppos)
1013 if (file->f_mode & FMODE_READ)
1014 return seq_read(file, ubuf, cnt, ppos);
1020 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1024 if (file->f_mode & FMODE_READ)
1025 ret = seq_lseek(file, offset, origin);
1027 file->f_pos = ret = 1;
1040 ftrace_match(unsigned char *buff, int len, int enable)
1042 char str[KSYM_SYMBOL_LEN];
1043 char *search = NULL;
1044 struct ftrace_page *pg;
1045 struct dyn_ftrace *rec;
1046 int type = MATCH_FULL;
1047 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1048 unsigned i, match = 0, search_len = 0;
1050 for (i = 0; i < len; i++) {
1051 if (buff[i] == '*') {
1053 search = buff + i + 1;
1054 type = MATCH_END_ONLY;
1055 search_len = len - (i + 1);
1057 if (type == MATCH_END_ONLY) {
1058 type = MATCH_MIDDLE_ONLY;
1061 type = MATCH_FRONT_ONLY;
1069 /* should not be called from interrupt context */
1070 spin_lock(&ftrace_lock);
1072 ftrace_filtered = 1;
1073 pg = ftrace_pages_start;
1075 for (i = 0; i < pg->index; i++) {
1079 rec = &pg->records[i];
1080 if (rec->flags & FTRACE_FL_FAILED)
1082 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1085 if (strcmp(str, buff) == 0)
1088 case MATCH_FRONT_ONLY:
1089 if (memcmp(str, buff, match) == 0)
1092 case MATCH_MIDDLE_ONLY:
1093 if (strstr(str, search))
1096 case MATCH_END_ONLY:
1097 ptr = strstr(str, search);
1098 if (ptr && (ptr[search_len] == 0))
1107 spin_unlock(&ftrace_lock);
1111 ftrace_regex_write(struct file *file, const char __user *ubuf,
1112 size_t cnt, loff_t *ppos, int enable)
1114 struct ftrace_iterator *iter;
1119 if (!cnt || cnt < 0)
1122 mutex_lock(&ftrace_regex_lock);
1124 if (file->f_mode & FMODE_READ) {
1125 struct seq_file *m = file->private_data;
1128 iter = file->private_data;
1131 iter->flags &= ~FTRACE_ITER_CONT;
1132 iter->buffer_idx = 0;
1135 ret = get_user(ch, ubuf++);
1141 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1142 /* skip white space */
1143 while (cnt && isspace(ch)) {
1144 ret = get_user(ch, ubuf++);
1152 file->f_pos += read;
1157 iter->buffer_idx = 0;
1160 while (cnt && !isspace(ch)) {
1161 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1162 iter->buffer[iter->buffer_idx++] = ch;
1167 ret = get_user(ch, ubuf++);
1176 iter->buffer[iter->buffer_idx] = 0;
1177 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1178 iter->buffer_idx = 0;
1180 iter->flags |= FTRACE_ITER_CONT;
1183 file->f_pos += read;
1187 mutex_unlock(&ftrace_regex_lock);
1193 ftrace_filter_write(struct file *file, const char __user *ubuf,
1194 size_t cnt, loff_t *ppos)
1196 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1200 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1201 size_t cnt, loff_t *ppos)
1203 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1207 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1209 if (unlikely(ftrace_disabled))
1212 mutex_lock(&ftrace_regex_lock);
1214 ftrace_filter_reset(enable);
1216 ftrace_match(buf, len, enable);
1217 mutex_unlock(&ftrace_regex_lock);
1221 * ftrace_set_filter - set a function to filter on in ftrace
1222 * @buf - the string that holds the function filter text.
1223 * @len - the length of the string.
1224 * @reset - non zero to reset all filters before applying this filter.
1226 * Filters denote which functions should be enabled when tracing is enabled.
1227 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1229 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1231 ftrace_set_regex(buf, len, reset, 1);
1235 * ftrace_set_notrace - set a function to not trace in ftrace
1236 * @buf - the string that holds the function notrace text.
1237 * @len - the length of the string.
1238 * @reset - non zero to reset all filters before applying this filter.
1240 * Notrace Filters denote which functions should not be enabled when tracing
1241 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1244 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1246 ftrace_set_regex(buf, len, reset, 0);
1250 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1252 struct seq_file *m = (struct seq_file *)file->private_data;
1253 struct ftrace_iterator *iter;
1255 mutex_lock(&ftrace_regex_lock);
1256 if (file->f_mode & FMODE_READ) {
1259 seq_release(inode, file);
1261 iter = file->private_data;
1263 if (iter->buffer_idx) {
1265 iter->buffer[iter->buffer_idx] = 0;
1266 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1269 mutex_lock(&ftrace_sysctl_lock);
1270 mutex_lock(&ftrace_start_lock);
1271 if (ftrace_start_up && ftrace_enabled)
1272 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1273 mutex_unlock(&ftrace_start_lock);
1274 mutex_unlock(&ftrace_sysctl_lock);
1277 mutex_unlock(&ftrace_regex_lock);
1282 ftrace_filter_release(struct inode *inode, struct file *file)
1284 return ftrace_regex_release(inode, file, 1);
1288 ftrace_notrace_release(struct inode *inode, struct file *file)
1290 return ftrace_regex_release(inode, file, 0);
1293 static struct file_operations ftrace_avail_fops = {
1294 .open = ftrace_avail_open,
1296 .llseek = seq_lseek,
1297 .release = ftrace_avail_release,
1300 static struct file_operations ftrace_failures_fops = {
1301 .open = ftrace_failures_open,
1303 .llseek = seq_lseek,
1304 .release = ftrace_avail_release,
1307 static struct file_operations ftrace_filter_fops = {
1308 .open = ftrace_filter_open,
1309 .read = ftrace_regex_read,
1310 .write = ftrace_filter_write,
1311 .llseek = ftrace_regex_lseek,
1312 .release = ftrace_filter_release,
1315 static struct file_operations ftrace_notrace_fops = {
1316 .open = ftrace_notrace_open,
1317 .read = ftrace_regex_read,
1318 .write = ftrace_notrace_write,
1319 .llseek = ftrace_regex_lseek,
1320 .release = ftrace_notrace_release,
1323 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1325 struct dentry *entry;
1327 entry = debugfs_create_file("available_filter_functions", 0444,
1328 d_tracer, NULL, &ftrace_avail_fops);
1330 pr_warning("Could not create debugfs "
1331 "'available_filter_functions' entry\n");
1333 entry = debugfs_create_file("failures", 0444,
1334 d_tracer, NULL, &ftrace_failures_fops);
1336 pr_warning("Could not create debugfs 'failures' entry\n");
1338 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1339 NULL, &ftrace_filter_fops);
1341 pr_warning("Could not create debugfs "
1342 "'set_ftrace_filter' entry\n");
1344 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1345 NULL, &ftrace_notrace_fops);
1347 pr_warning("Could not create debugfs "
1348 "'set_ftrace_notrace' entry\n");
1353 static int ftrace_convert_nops(struct module *mod,
1354 unsigned long *start,
1359 unsigned long flags;
1361 mutex_lock(&ftrace_start_lock);
1364 addr = ftrace_call_adjust(*p++);
1366 * Some architecture linkers will pad between
1367 * the different mcount_loc sections of different
1368 * object files to satisfy alignments.
1369 * Skip any NULL pointers.
1373 ftrace_record_ip(addr);
1376 /* disable interrupts to prevent kstop machine */
1377 local_irq_save(flags);
1378 ftrace_update_code(mod);
1379 local_irq_restore(flags);
1380 mutex_unlock(&ftrace_start_lock);
1385 void ftrace_init_module(struct module *mod,
1386 unsigned long *start, unsigned long *end)
1388 if (ftrace_disabled || start == end)
1390 ftrace_convert_nops(mod, start, end);
1393 extern unsigned long __start_mcount_loc[];
1394 extern unsigned long __stop_mcount_loc[];
1396 void __init ftrace_init(void)
1398 unsigned long count, addr, flags;
1401 /* Keep the ftrace pointer to the stub */
1402 addr = (unsigned long)ftrace_stub;
1404 local_irq_save(flags);
1405 ftrace_dyn_arch_init(&addr);
1406 local_irq_restore(flags);
1408 /* ftrace_dyn_arch_init places the return code in addr */
1412 count = __stop_mcount_loc - __start_mcount_loc;
1414 ret = ftrace_dyn_table_alloc(count);
1418 last_ftrace_enabled = ftrace_enabled = 1;
1420 ret = ftrace_convert_nops(NULL,
1426 ftrace_disabled = 1;
1431 static int __init ftrace_nodyn_init(void)
1436 device_initcall(ftrace_nodyn_init);
1438 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1439 static inline void ftrace_startup_enable(int command) { }
1440 /* Keep as macros so we do not need to define the commands */
1441 # define ftrace_startup(command) do { } while (0)
1442 # define ftrace_shutdown(command) do { } while (0)
1443 # define ftrace_startup_sysctl() do { } while (0)
1444 # define ftrace_shutdown_sysctl() do { } while (0)
1445 #endif /* CONFIG_DYNAMIC_FTRACE */
1448 ftrace_pid_read(struct file *file, char __user *ubuf,
1449 size_t cnt, loff_t *ppos)
1454 if (ftrace_pid_trace >= 0)
1455 r = sprintf(buf, "%u\n", ftrace_pid_trace);
1457 r = sprintf(buf, "no pid\n");
1459 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1463 ftrace_pid_write(struct file *filp, const char __user *ubuf,
1464 size_t cnt, loff_t *ppos)
1470 if (cnt >= sizeof(buf))
1473 if (copy_from_user(&buf, ubuf, cnt))
1478 ret = strict_strtol(buf, 10, &val);
1482 mutex_lock(&ftrace_start_lock);
1484 /* disable pid tracing */
1485 if (ftrace_pid_trace < 0)
1487 ftrace_pid_trace = -1;
1491 if (ftrace_pid_trace == val)
1494 ftrace_pid_trace = val;
1497 /* update the function call */
1498 ftrace_update_pid_func();
1499 ftrace_startup_enable(0);
1502 mutex_unlock(&ftrace_start_lock);
1507 static struct file_operations ftrace_pid_fops = {
1508 .read = ftrace_pid_read,
1509 .write = ftrace_pid_write,
1512 static __init int ftrace_init_debugfs(void)
1514 struct dentry *d_tracer;
1515 struct dentry *entry;
1517 d_tracer = tracing_init_dentry();
1521 ftrace_init_dyn_debugfs(d_tracer);
1523 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1524 NULL, &ftrace_pid_fops);
1526 pr_warning("Could not create debugfs "
1527 "'set_ftrace_pid' entry\n");
1531 fs_initcall(ftrace_init_debugfs);
1534 * ftrace_kill - kill ftrace
1536 * This function should be used by panic code. It stops ftrace
1537 * but in a not so nice way. If you need to simply kill ftrace
1538 * from a non-atomic section, use ftrace_kill.
1540 void ftrace_kill(void)
1542 ftrace_disabled = 1;
1544 clear_ftrace_function();
1548 * register_ftrace_function - register a function for profiling
1549 * @ops - ops structure that holds the function for profiling.
1551 * Register a function to be called by all functions in the
1554 * Note: @ops->func and all the functions it calls must be labeled
1555 * with "notrace", otherwise it will go into a
1558 int register_ftrace_function(struct ftrace_ops *ops)
1562 if (unlikely(ftrace_disabled))
1565 mutex_lock(&ftrace_sysctl_lock);
1567 ret = __register_ftrace_function(ops);
1570 mutex_unlock(&ftrace_sysctl_lock);
1575 * unregister_ftrace_function - unresgister a function for profiling.
1576 * @ops - ops structure that holds the function to unregister
1578 * Unregister a function that was added to be called by ftrace profiling.
1580 int unregister_ftrace_function(struct ftrace_ops *ops)
1584 mutex_lock(&ftrace_sysctl_lock);
1585 ret = __unregister_ftrace_function(ops);
1587 mutex_unlock(&ftrace_sysctl_lock);
1593 ftrace_enable_sysctl(struct ctl_table *table, int write,
1594 struct file *file, void __user *buffer, size_t *lenp,
1599 if (unlikely(ftrace_disabled))
1602 mutex_lock(&ftrace_sysctl_lock);
1604 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1606 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1609 last_ftrace_enabled = ftrace_enabled;
1611 if (ftrace_enabled) {
1613 ftrace_startup_sysctl();
1615 /* we are starting ftrace again */
1616 if (ftrace_list != &ftrace_list_end) {
1617 if (ftrace_list->next == &ftrace_list_end)
1618 ftrace_trace_function = ftrace_list->func;
1620 ftrace_trace_function = ftrace_list_func;
1624 /* stopping ftrace calls (just send to ftrace_stub) */
1625 ftrace_trace_function = ftrace_stub;
1627 ftrace_shutdown_sysctl();
1631 mutex_unlock(&ftrace_sysctl_lock);
1635 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1637 static atomic_t ftrace_graph_active;
1639 /* The callbacks that hook a function */
1640 trace_func_graph_ret_t ftrace_graph_return =
1641 (trace_func_graph_ret_t)ftrace_stub;
1642 trace_func_graph_ent_t ftrace_graph_entry =
1643 (trace_func_graph_ent_t)ftrace_stub;
1645 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1646 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1650 unsigned long flags;
1651 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1652 struct task_struct *g, *t;
1654 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1655 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1656 * sizeof(struct ftrace_ret_stack),
1658 if (!ret_stack_list[i]) {
1666 read_lock_irqsave(&tasklist_lock, flags);
1667 do_each_thread(g, t) {
1673 if (t->ret_stack == NULL) {
1674 t->curr_ret_stack = -1;
1675 /* Make sure IRQs see the -1 first: */
1677 t->ret_stack = ret_stack_list[start++];
1678 atomic_set(&t->trace_overrun, 0);
1680 } while_each_thread(g, t);
1683 read_unlock_irqrestore(&tasklist_lock, flags);
1685 for (i = start; i < end; i++)
1686 kfree(ret_stack_list[i]);
1690 /* Allocate a return stack for each task */
1691 static int start_graph_tracing(void)
1693 struct ftrace_ret_stack **ret_stack_list;
1696 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
1697 sizeof(struct ftrace_ret_stack *),
1700 if (!ret_stack_list)
1704 ret = alloc_retstack_tasklist(ret_stack_list);
1705 } while (ret == -EAGAIN);
1707 kfree(ret_stack_list);
1711 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
1712 trace_func_graph_ent_t entryfunc)
1716 mutex_lock(&ftrace_sysctl_lock);
1718 atomic_inc(&ftrace_graph_active);
1719 ret = start_graph_tracing();
1721 atomic_dec(&ftrace_graph_active);
1725 ftrace_graph_return = retfunc;
1726 ftrace_graph_entry = entryfunc;
1728 ftrace_startup(FTRACE_START_FUNC_RET);
1731 mutex_unlock(&ftrace_sysctl_lock);
1735 void unregister_ftrace_graph(void)
1737 mutex_lock(&ftrace_sysctl_lock);
1739 atomic_dec(&ftrace_graph_active);
1740 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
1741 ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
1742 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
1744 mutex_unlock(&ftrace_sysctl_lock);
1747 /* Allocate a return stack for newly created task */
1748 void ftrace_graph_init_task(struct task_struct *t)
1750 if (atomic_read(&ftrace_graph_active)) {
1751 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
1752 * sizeof(struct ftrace_ret_stack),
1756 t->curr_ret_stack = -1;
1757 atomic_set(&t->trace_overrun, 0);
1759 t->ret_stack = NULL;
1762 void ftrace_graph_exit_task(struct task_struct *t)
1764 struct ftrace_ret_stack *ret_stack = t->ret_stack;
1766 t->ret_stack = NULL;
1767 /* NULL must become visible to IRQs before we free it: */