4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
20 #include "trace_output.h"
22 #define TRACE_SYSTEM "TRACE_SYSTEM"
24 DEFINE_MUTEX(event_mutex);
26 LIST_HEAD(ftrace_events);
28 int trace_define_field(struct ftrace_event_call *call, char *type,
29 char *name, int offset, int size, int is_signed)
31 struct ftrace_event_field *field;
33 field = kzalloc(sizeof(*field), GFP_KERNEL);
37 field->name = kstrdup(name, GFP_KERNEL);
41 field->type = kstrdup(type, GFP_KERNEL);
45 field->offset = offset;
47 field->is_signed = is_signed;
48 list_add(&field->link, &call->fields);
61 EXPORT_SYMBOL_GPL(trace_define_field);
65 static void trace_destroy_fields(struct ftrace_event_call *call)
67 struct ftrace_event_field *field, *next;
69 list_for_each_entry_safe(field, next, &call->fields, link) {
70 list_del(&field->link);
77 #endif /* CONFIG_MODULES */
79 static void ftrace_event_enable_disable(struct ftrace_event_call *call,
86 tracing_stop_cmdline_record();
93 tracing_start_cmdline_record();
100 static void ftrace_clear_events(void)
102 struct ftrace_event_call *call;
104 mutex_lock(&event_mutex);
105 list_for_each_entry(call, &ftrace_events, list) {
106 ftrace_event_enable_disable(call, 0);
108 mutex_unlock(&event_mutex);
112 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
114 static int __ftrace_set_clr_event(const char *match, const char *sub,
115 const char *event, int set)
117 struct ftrace_event_call *call;
120 mutex_lock(&event_mutex);
121 list_for_each_entry(call, &ftrace_events, list) {
123 if (!call->name || !call->regfunc)
127 strcmp(match, call->name) != 0 &&
128 strcmp(match, call->system) != 0)
131 if (sub && strcmp(sub, call->system) != 0)
134 if (event && strcmp(event, call->name) != 0)
137 ftrace_event_enable_disable(call, set);
141 mutex_unlock(&event_mutex);
146 static int ftrace_set_clr_event(char *buf, int set)
148 char *event = NULL, *sub = NULL, *match;
151 * The buf format can be <subsystem>:<event-name>
152 * *:<event-name> means any event by that name.
153 * :<event-name> is the same.
155 * <subsystem>:* means all events in that subsystem
156 * <subsystem>: means the same.
158 * <name> (no ':') means all events in a subsystem with
159 * the name <name> or any event that matches <name>
162 match = strsep(&buf, ":");
168 if (!strlen(sub) || strcmp(sub, "*") == 0)
170 if (!strlen(event) || strcmp(event, "*") == 0)
174 return __ftrace_set_clr_event(match, sub, event, set);
178 * trace_set_clr_event - enable or disable an event
179 * @system: system name to match (NULL for any system)
180 * @event: event name to match (NULL for all events, within system)
181 * @set: 1 to enable, 0 to disable
183 * This is a way for other parts of the kernel to enable or disable
186 * Returns 0 on success, -EINVAL if the parameters do not match any
189 int trace_set_clr_event(const char *system, const char *event, int set)
191 return __ftrace_set_clr_event(NULL, system, event, set);
194 /* 128 should be much more than enough */
195 #define EVENT_BUF_SIZE 127
198 ftrace_event_write(struct file *file, const char __user *ubuf,
199 size_t cnt, loff_t *ppos)
210 ret = tracing_update_buffers();
214 ret = get_user(ch, ubuf++);
220 /* skip white space */
221 while (cnt && isspace(ch)) {
222 ret = get_user(ch, ubuf++);
229 /* Only white space found? */
236 buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
240 if (cnt > EVENT_BUF_SIZE)
241 cnt = EVENT_BUF_SIZE;
244 while (cnt && !isspace(ch)) {
250 ret = get_user(ch, ubuf++);
260 ret = ftrace_set_clr_event(buf, set);
273 t_next(struct seq_file *m, void *v, loff_t *pos)
275 struct list_head *list = m->private;
276 struct ftrace_event_call *call;
281 if (list == &ftrace_events)
284 call = list_entry(list, struct ftrace_event_call, list);
287 * The ftrace subsystem is for showing formats only.
288 * They can not be enabled or disabled via the event files.
296 m->private = list->next;
301 static void *t_start(struct seq_file *m, loff_t *pos)
303 mutex_lock(&event_mutex);
305 m->private = ftrace_events.next;
306 return t_next(m, NULL, pos);
310 s_next(struct seq_file *m, void *v, loff_t *pos)
312 struct list_head *list = m->private;
313 struct ftrace_event_call *call;
318 if (list == &ftrace_events)
321 call = list_entry(list, struct ftrace_event_call, list);
323 if (!call->enabled) {
328 m->private = list->next;
333 static void *s_start(struct seq_file *m, loff_t *pos)
335 mutex_lock(&event_mutex);
337 m->private = ftrace_events.next;
338 return s_next(m, NULL, pos);
341 static int t_show(struct seq_file *m, void *v)
343 struct ftrace_event_call *call = v;
345 if (strcmp(call->system, TRACE_SYSTEM) != 0)
346 seq_printf(m, "%s:", call->system);
347 seq_printf(m, "%s\n", call->name);
352 static void t_stop(struct seq_file *m, void *p)
354 mutex_unlock(&event_mutex);
358 ftrace_event_seq_open(struct inode *inode, struct file *file)
360 const struct seq_operations *seq_ops;
362 if ((file->f_mode & FMODE_WRITE) &&
363 !(file->f_flags & O_APPEND))
364 ftrace_clear_events();
366 seq_ops = inode->i_private;
367 return seq_open(file, seq_ops);
371 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
374 struct ftrace_event_call *call = filp->private_data;
382 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
386 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
389 struct ftrace_event_call *call = filp->private_data;
394 if (cnt >= sizeof(buf))
397 if (copy_from_user(&buf, ubuf, cnt))
402 ret = strict_strtoul(buf, 10, &val);
406 ret = tracing_update_buffers();
413 mutex_lock(&event_mutex);
414 ftrace_event_enable_disable(call, val);
415 mutex_unlock(&event_mutex);
428 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
431 const char set_to_char[4] = { '?', '0', '1', 'X' };
432 const char *system = filp->private_data;
433 struct ftrace_event_call *call;
438 mutex_lock(&event_mutex);
439 list_for_each_entry(call, &ftrace_events, list) {
440 if (!call->name || !call->regfunc)
443 if (system && strcmp(call->system, system) != 0)
447 * We need to find out if all the events are set
448 * or if all events or cleared, or if we have
451 set |= (1 << !!call->enabled);
454 * If we have a mixture, no need to look further.
459 mutex_unlock(&event_mutex);
461 buf[0] = set_to_char[set];
464 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
470 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
473 const char *system = filp->private_data;
478 if (cnt >= sizeof(buf))
481 if (copy_from_user(&buf, ubuf, cnt))
486 ret = strict_strtoul(buf, 10, &val);
490 ret = tracing_update_buffers();
494 if (val != 0 && val != 1)
497 ret = __ftrace_set_clr_event(NULL, system, NULL, val);
509 extern char *__bad_type_size(void);
512 #define FIELD(type, name) \
513 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
514 #type, "common_" #name, offsetof(typeof(field), name), \
517 static int trace_write_header(struct trace_seq *s)
519 struct trace_entry field;
521 /* struct trace_entry */
522 return trace_seq_printf(s,
523 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
524 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
525 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
526 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
527 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
529 FIELD(unsigned short, type),
530 FIELD(unsigned char, flags),
531 FIELD(unsigned char, preempt_count),
537 event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
540 struct ftrace_event_call *call = filp->private_data;
548 s = kmalloc(sizeof(*s), GFP_KERNEL);
554 /* If any of the first writes fail, so will the show_format. */
556 trace_seq_printf(s, "name: %s\n", call->name);
557 trace_seq_printf(s, "ID: %d\n", call->id);
558 trace_seq_printf(s, "format:\n");
559 trace_write_header(s);
561 r = call->show_format(s);
564 * ug! The format output is bigger than a PAGE!!
566 buf = "FORMAT TOO BIG\n";
567 r = simple_read_from_buffer(ubuf, cnt, ppos,
572 r = simple_read_from_buffer(ubuf, cnt, ppos,
580 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
582 struct ftrace_event_call *call = filp->private_data;
589 s = kmalloc(sizeof(*s), GFP_KERNEL);
594 trace_seq_printf(s, "%d\n", call->id);
596 r = simple_read_from_buffer(ubuf, cnt, ppos,
603 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
606 struct ftrace_event_call *call = filp->private_data;
613 s = kmalloc(sizeof(*s), GFP_KERNEL);
619 print_event_filter(call, s);
620 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
628 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
631 struct ftrace_event_call *call = filp->private_data;
635 if (cnt >= PAGE_SIZE)
638 buf = (char *)__get_free_page(GFP_TEMPORARY);
642 if (copy_from_user(buf, ubuf, cnt)) {
643 free_page((unsigned long) buf);
648 err = apply_event_filter(call, buf);
649 free_page((unsigned long) buf);
659 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
662 struct event_subsystem *system = filp->private_data;
669 s = kmalloc(sizeof(*s), GFP_KERNEL);
675 print_subsystem_event_filter(system, s);
676 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
684 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
687 struct event_subsystem *system = filp->private_data;
691 if (cnt >= PAGE_SIZE)
694 buf = (char *)__get_free_page(GFP_TEMPORARY);
698 if (copy_from_user(buf, ubuf, cnt)) {
699 free_page((unsigned long) buf);
704 err = apply_subsystem_event_filter(system, buf);
705 free_page((unsigned long) buf);
715 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
717 int (*func)(struct trace_seq *s) = filp->private_data;
724 s = kmalloc(sizeof(*s), GFP_KERNEL);
731 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
738 static const struct seq_operations show_event_seq_ops = {
745 static const struct seq_operations show_set_event_seq_ops = {
752 static const struct file_operations ftrace_avail_fops = {
753 .open = ftrace_event_seq_open,
756 .release = seq_release,
759 static const struct file_operations ftrace_set_event_fops = {
760 .open = ftrace_event_seq_open,
762 .write = ftrace_event_write,
764 .release = seq_release,
767 static const struct file_operations ftrace_enable_fops = {
768 .open = tracing_open_generic,
769 .read = event_enable_read,
770 .write = event_enable_write,
773 static const struct file_operations ftrace_event_format_fops = {
774 .open = tracing_open_generic,
775 .read = event_format_read,
778 static const struct file_operations ftrace_event_id_fops = {
779 .open = tracing_open_generic,
780 .read = event_id_read,
783 static const struct file_operations ftrace_event_filter_fops = {
784 .open = tracing_open_generic,
785 .read = event_filter_read,
786 .write = event_filter_write,
789 static const struct file_operations ftrace_subsystem_filter_fops = {
790 .open = tracing_open_generic,
791 .read = subsystem_filter_read,
792 .write = subsystem_filter_write,
795 static const struct file_operations ftrace_system_enable_fops = {
796 .open = tracing_open_generic,
797 .read = system_enable_read,
798 .write = system_enable_write,
801 static const struct file_operations ftrace_show_header_fops = {
802 .open = tracing_open_generic,
806 static struct dentry *event_trace_events_dir(void)
808 static struct dentry *d_tracer;
809 static struct dentry *d_events;
814 d_tracer = tracing_init_dentry();
818 d_events = debugfs_create_dir("events", d_tracer);
820 pr_warning("Could not create debugfs "
821 "'events' directory\n");
826 static LIST_HEAD(event_subsystems);
828 static struct dentry *
829 event_subsystem_dir(const char *name, struct dentry *d_events)
831 struct event_subsystem *system;
832 struct dentry *entry;
834 /* First see if we did not already create this dir */
835 list_for_each_entry(system, &event_subsystems, list) {
836 if (strcmp(system->name, name) == 0)
837 return system->entry;
840 /* need to create new entry */
841 system = kmalloc(sizeof(*system), GFP_KERNEL);
843 pr_warning("No memory to create event subsystem %s\n",
848 system->entry = debugfs_create_dir(name, d_events);
849 if (!system->entry) {
850 pr_warning("Could not create event subsystem %s\n",
856 system->name = kstrdup(name, GFP_KERNEL);
858 debugfs_remove(system->entry);
863 list_add(&system->list, &event_subsystems);
865 system->filter = NULL;
867 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
868 if (!system->filter) {
869 pr_warning("Could not allocate filter for subsystem "
871 return system->entry;
874 entry = debugfs_create_file("filter", 0644, system->entry, system,
875 &ftrace_subsystem_filter_fops);
877 kfree(system->filter);
878 system->filter = NULL;
879 pr_warning("Could not create debugfs "
880 "'%s/filter' entry\n", name);
883 entry = trace_create_file("enable", 0644, system->entry,
884 (void *)system->name,
885 &ftrace_system_enable_fops);
887 return system->entry;
891 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
892 const struct file_operations *id,
893 const struct file_operations *enable,
894 const struct file_operations *filter,
895 const struct file_operations *format)
897 struct dentry *entry;
901 * If the trace point header did not define TRACE_SYSTEM
902 * then the system would be called "TRACE_SYSTEM".
904 if (strcmp(call->system, TRACE_SYSTEM) != 0)
905 d_events = event_subsystem_dir(call->system, d_events);
907 if (call->raw_init) {
908 ret = call->raw_init();
910 pr_warning("Could not initialize trace point"
911 " events/%s\n", call->name);
916 call->dir = debugfs_create_dir(call->name, d_events);
918 pr_warning("Could not create debugfs "
919 "'%s' directory\n", call->name);
924 entry = trace_create_file("enable", 0644, call->dir, call,
928 entry = trace_create_file("id", 0444, call->dir, call,
931 if (call->define_fields) {
932 ret = call->define_fields();
934 pr_warning("Could not initialize trace point"
935 " events/%s\n", call->name);
938 entry = trace_create_file("filter", 0644, call->dir, call,
942 /* A trace may not want to export its format */
943 if (!call->show_format)
946 entry = trace_create_file("format", 0444, call->dir, call,
952 #define for_each_event(event, start, end) \
953 for (event = start; \
954 (unsigned long)event < (unsigned long)end; \
957 #ifdef CONFIG_MODULES
959 static LIST_HEAD(ftrace_module_file_list);
962 * Modules must own their file_operations to keep up with
963 * reference counting.
965 struct ftrace_module_file_ops {
966 struct list_head list;
968 struct file_operations id;
969 struct file_operations enable;
970 struct file_operations format;
971 struct file_operations filter;
974 static struct ftrace_module_file_ops *
975 trace_create_file_ops(struct module *mod)
977 struct ftrace_module_file_ops *file_ops;
980 * This is a bit of a PITA. To allow for correct reference
981 * counting, modules must "own" their file_operations.
982 * To do this, we allocate the file operations that will be
983 * used in the event directory.
986 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
992 file_ops->id = ftrace_event_id_fops;
993 file_ops->id.owner = mod;
995 file_ops->enable = ftrace_enable_fops;
996 file_ops->enable.owner = mod;
998 file_ops->filter = ftrace_event_filter_fops;
999 file_ops->filter.owner = mod;
1001 file_ops->format = ftrace_event_format_fops;
1002 file_ops->format.owner = mod;
1004 list_add(&file_ops->list, &ftrace_module_file_list);
1009 static void trace_module_add_events(struct module *mod)
1011 struct ftrace_module_file_ops *file_ops = NULL;
1012 struct ftrace_event_call *call, *start, *end;
1013 struct dentry *d_events;
1015 start = mod->trace_events;
1016 end = mod->trace_events + mod->num_trace_events;
1021 d_events = event_trace_events_dir();
1025 for_each_event(call, start, end) {
1026 /* The linker may leave blanks */
1031 * This module has events, create file ops for this module
1032 * if not already done.
1035 file_ops = trace_create_file_ops(mod);
1040 list_add(&call->list, &ftrace_events);
1041 event_create_dir(call, d_events,
1042 &file_ops->id, &file_ops->enable,
1043 &file_ops->filter, &file_ops->format);
1047 static void trace_module_remove_events(struct module *mod)
1049 struct ftrace_module_file_ops *file_ops;
1050 struct ftrace_event_call *call, *p;
1053 down_write(&trace_event_mutex);
1054 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1055 if (call->mod == mod) {
1057 ftrace_event_enable_disable(call, 0);
1059 __unregister_ftrace_event(call->event);
1060 debugfs_remove_recursive(call->dir);
1061 list_del(&call->list);
1062 trace_destroy_fields(call);
1063 destroy_preds(call);
1067 /* Now free the file_operations */
1068 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1069 if (file_ops->mod == mod)
1072 if (&file_ops->list != &ftrace_module_file_list) {
1073 list_del(&file_ops->list);
1078 * It is safest to reset the ring buffer if the module being unloaded
1079 * registered any events.
1082 tracing_reset_current_online_cpus();
1083 up_write(&trace_event_mutex);
1086 static int trace_module_notify(struct notifier_block *self,
1087 unsigned long val, void *data)
1089 struct module *mod = data;
1091 mutex_lock(&event_mutex);
1093 case MODULE_STATE_COMING:
1094 trace_module_add_events(mod);
1096 case MODULE_STATE_GOING:
1097 trace_module_remove_events(mod);
1100 mutex_unlock(&event_mutex);
1105 static int trace_module_notify(struct notifier_block *self,
1106 unsigned long val, void *data)
1110 #endif /* CONFIG_MODULES */
1112 struct notifier_block trace_module_nb = {
1113 .notifier_call = trace_module_notify,
1117 extern struct ftrace_event_call __start_ftrace_events[];
1118 extern struct ftrace_event_call __stop_ftrace_events[];
1120 static __init int event_trace_init(void)
1122 struct ftrace_event_call *call;
1123 struct dentry *d_tracer;
1124 struct dentry *entry;
1125 struct dentry *d_events;
1128 d_tracer = tracing_init_dentry();
1132 entry = debugfs_create_file("available_events", 0444, d_tracer,
1133 (void *)&show_event_seq_ops,
1134 &ftrace_avail_fops);
1136 pr_warning("Could not create debugfs "
1137 "'available_events' entry\n");
1139 entry = debugfs_create_file("set_event", 0644, d_tracer,
1140 (void *)&show_set_event_seq_ops,
1141 &ftrace_set_event_fops);
1143 pr_warning("Could not create debugfs "
1144 "'set_event' entry\n");
1146 d_events = event_trace_events_dir();
1150 /* ring buffer internal formats */
1151 trace_create_file("header_page", 0444, d_events,
1152 ring_buffer_print_page_header,
1153 &ftrace_show_header_fops);
1155 trace_create_file("header_event", 0444, d_events,
1156 ring_buffer_print_entry_header,
1157 &ftrace_show_header_fops);
1159 trace_create_file("enable", 0644, d_events,
1160 NULL, &ftrace_system_enable_fops);
1162 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1163 /* The linker may leave blanks */
1166 list_add(&call->list, &ftrace_events);
1167 event_create_dir(call, d_events, &ftrace_event_id_fops,
1168 &ftrace_enable_fops, &ftrace_event_filter_fops,
1169 &ftrace_event_format_fops);
1172 ret = register_module_notifier(&trace_module_nb);
1174 pr_warning("Failed to register trace events module notifier\n");
1178 fs_initcall(event_trace_init);
1180 #ifdef CONFIG_FTRACE_STARTUP_TEST
1182 static DEFINE_SPINLOCK(test_spinlock);
1183 static DEFINE_SPINLOCK(test_spinlock_irq);
1184 static DEFINE_MUTEX(test_mutex);
1186 static __init void test_work(struct work_struct *dummy)
1188 spin_lock(&test_spinlock);
1189 spin_lock_irq(&test_spinlock_irq);
1191 spin_unlock_irq(&test_spinlock_irq);
1192 spin_unlock(&test_spinlock);
1194 mutex_lock(&test_mutex);
1196 mutex_unlock(&test_mutex);
1199 static __init int event_test_thread(void *unused)
1203 test_malloc = kmalloc(1234, GFP_KERNEL);
1205 pr_info("failed to kmalloc\n");
1207 schedule_on_each_cpu(test_work);
1211 set_current_state(TASK_INTERRUPTIBLE);
1212 while (!kthread_should_stop())
1219 * Do various things that may trigger events.
1221 static __init void event_test_stuff(void)
1223 struct task_struct *test_thread;
1225 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1227 kthread_stop(test_thread);
1231 * For every trace event defined, we will test each trace point separately,
1232 * and then by groups, and finally all trace points.
1234 static __init void event_trace_self_tests(void)
1236 struct ftrace_event_call *call;
1237 struct event_subsystem *system;
1240 pr_info("Running tests on trace events:\n");
1242 list_for_each_entry(call, &ftrace_events, list) {
1244 /* Only test those that have a regfunc */
1248 pr_info("Testing event %s: ", call->name);
1251 * If an event is already enabled, someone is using
1252 * it and the self test should not be on.
1254 if (call->enabled) {
1255 pr_warning("Enabled event during self test!\n");
1260 ftrace_event_enable_disable(call, 1);
1262 ftrace_event_enable_disable(call, 0);
1267 /* Now test at the sub system level */
1269 pr_info("Running tests on trace event systems:\n");
1271 list_for_each_entry(system, &event_subsystems, list) {
1273 /* the ftrace system is special, skip it */
1274 if (strcmp(system->name, "ftrace") == 0)
1277 pr_info("Testing event system %s: ", system->name);
1279 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
1280 if (WARN_ON_ONCE(ret)) {
1281 pr_warning("error enabling system %s\n",
1288 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
1289 if (WARN_ON_ONCE(ret))
1290 pr_warning("error disabling system %s\n",
1296 /* Test with all events enabled */
1298 pr_info("Running tests on all trace events:\n");
1299 pr_info("Testing all events: ");
1301 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
1302 if (WARN_ON_ONCE(ret)) {
1303 pr_warning("error enabling all events\n");
1310 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
1311 if (WARN_ON_ONCE(ret)) {
1312 pr_warning("error disabling all events\n");
1319 #ifdef CONFIG_FUNCTION_TRACER
1321 static DEFINE_PER_CPU(atomic_t, test_event_disable);
1324 function_test_events_call(unsigned long ip, unsigned long parent_ip)
1326 struct ring_buffer_event *event;
1327 struct ftrace_entry *entry;
1328 unsigned long flags;
1334 pc = preempt_count();
1335 resched = ftrace_preempt_disable();
1336 cpu = raw_smp_processor_id();
1337 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
1342 local_save_flags(flags);
1344 event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
1348 entry = ring_buffer_event_data(event);
1350 entry->parent_ip = parent_ip;
1352 trace_nowake_buffer_unlock_commit(event, flags, pc);
1355 atomic_dec(&per_cpu(test_event_disable, cpu));
1356 ftrace_preempt_enable(resched);
1359 static struct ftrace_ops trace_ops __initdata =
1361 .func = function_test_events_call,
1364 static __init void event_trace_self_test_with_function(void)
1366 register_ftrace_function(&trace_ops);
1367 pr_info("Running tests again, along with the function tracer\n");
1368 event_trace_self_tests();
1369 unregister_ftrace_function(&trace_ops);
1372 static __init void event_trace_self_test_with_function(void)
1377 static __init int event_trace_self_tests_init(void)
1380 event_trace_self_tests();
1382 event_trace_self_test_with_function();
1387 late_initcall(event_trace_self_tests_init);