2 * Kprobes-based tracing events
4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/module.h>
21 #include <linux/uaccess.h>
22 #include <linux/kprobes.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/smp.h>
26 #include <linux/debugfs.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/ctype.h>
30 #include <linux/ptrace.h>
31 #include <linux/perf_event.h>
34 #include "trace_output.h"
36 #define MAX_TRACE_ARGS 128
37 #define MAX_ARGSTR_LEN 63
38 #define MAX_EVENT_NAME_LEN 64
39 #define KPROBE_EVENT_SYSTEM "kprobes"
41 /* Reserved field names */
42 #define FIELD_STRING_IP "__probe_ip"
43 #define FIELD_STRING_NARGS "__probe_nargs"
44 #define FIELD_STRING_RETIP "__probe_ret_ip"
45 #define FIELD_STRING_FUNC "__probe_func"
47 const char *reserved_field_names[] = {
50 "common_preempt_count",
61 unsigned long (*func)(struct pt_regs *, void *);
65 static __kprobes unsigned long call_fetch(struct fetch_func *f,
68 return f->func(regs, f->data);
72 static __kprobes unsigned long fetch_register(struct pt_regs *regs,
75 return regs_get_register(regs, (unsigned int)((unsigned long)offset));
78 static __kprobes unsigned long fetch_stack(struct pt_regs *regs,
81 return regs_get_kernel_stack_nth(regs,
82 (unsigned int)((unsigned long)num));
85 static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
89 if (probe_kernel_address(addr, retval))
94 static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num)
96 return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num));
99 static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
102 return regs_return_value(regs);
105 static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs,
108 return kernel_stack_pointer(regs);
111 /* Memory fetching by symbol */
112 struct symbol_cache {
118 static unsigned long update_symbol_cache(struct symbol_cache *sc)
120 sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
122 sc->addr += sc->offset;
126 static void free_symbol_cache(struct symbol_cache *sc)
132 static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
134 struct symbol_cache *sc;
136 if (!sym || strlen(sym) == 0)
138 sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
142 sc->symbol = kstrdup(sym, GFP_KERNEL);
149 update_symbol_cache(sc);
153 static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data)
155 struct symbol_cache *sc = data;
158 return fetch_memory(regs, (void *)sc->addr);
163 /* Special indirect memory access interface */
164 struct indirect_fetch_data {
165 struct fetch_func orig;
169 static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data)
171 struct indirect_fetch_data *ind = data;
174 addr = call_fetch(&ind->orig, regs);
177 return fetch_memory(regs, (void *)addr);
182 static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data)
184 if (data->orig.func == fetch_indirect)
185 free_indirect_fetch_data(data->orig.data);
186 else if (data->orig.func == fetch_symbol)
187 free_symbol_cache(data->orig.data);
192 * Kprobe event core functions
196 struct fetch_func fetch;
200 /* Flags for trace_probe */
201 #define TP_FLAG_TRACE 1
202 #define TP_FLAG_PROFILE 2
205 struct list_head list;
206 struct kretprobe rp; /* Use rp.kp for kprobe use */
208 unsigned int flags; /* For TP_FLAG_* */
209 const char *symbol; /* symbol name */
210 struct ftrace_event_call call;
211 struct trace_event event;
212 unsigned int nr_args;
213 struct probe_arg args[];
216 #define SIZEOF_TRACE_PROBE(n) \
217 (offsetof(struct trace_probe, args) + \
218 (sizeof(struct probe_arg) * (n)))
220 static __kprobes int probe_is_return(struct trace_probe *tp)
222 return tp->rp.handler != NULL;
225 static __kprobes const char *probe_symbol(struct trace_probe *tp)
227 return tp->symbol ? tp->symbol : "unknown";
230 static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
234 if (ff->func == fetch_argument)
235 ret = snprintf(buf, n, "$arg%lu", (unsigned long)ff->data);
236 else if (ff->func == fetch_register) {
238 name = regs_query_register_name((unsigned int)((long)ff->data));
239 ret = snprintf(buf, n, "%%%s", name);
240 } else if (ff->func == fetch_stack)
241 ret = snprintf(buf, n, "$stack%lu", (unsigned long)ff->data);
242 else if (ff->func == fetch_memory)
243 ret = snprintf(buf, n, "@0x%p", ff->data);
244 else if (ff->func == fetch_symbol) {
245 struct symbol_cache *sc = ff->data;
247 ret = snprintf(buf, n, "@%s%+ld", sc->symbol,
250 ret = snprintf(buf, n, "@%s", sc->symbol);
251 } else if (ff->func == fetch_retvalue)
252 ret = snprintf(buf, n, "$retval");
253 else if (ff->func == fetch_stack_address)
254 ret = snprintf(buf, n, "$stack");
255 else if (ff->func == fetch_indirect) {
256 struct indirect_fetch_data *id = ff->data;
258 ret = snprintf(buf, n, "%+ld(", id->offset);
262 ret = probe_arg_string(buf + l, n - l, &id->orig);
266 ret = snprintf(buf + l, n - l, ")");
275 static int register_probe_event(struct trace_probe *tp);
276 static void unregister_probe_event(struct trace_probe *tp);
278 static DEFINE_MUTEX(probe_lock);
279 static LIST_HEAD(probe_list);
281 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
282 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
283 struct pt_regs *regs);
285 /* Check the name is good for event/group */
286 static int check_event_name(const char *name)
288 if (!isalpha(*name) && *name != '_')
290 while (*++name != '\0') {
291 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
298 * Allocate new trace_probe and initialize it (including kprobes).
300 static struct trace_probe *alloc_trace_probe(const char *group,
305 int nargs, int is_return)
307 struct trace_probe *tp;
310 tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
315 tp->symbol = kstrdup(symbol, GFP_KERNEL);
318 tp->rp.kp.symbol_name = tp->symbol;
319 tp->rp.kp.offset = offs;
321 tp->rp.kp.addr = addr;
324 tp->rp.handler = kretprobe_dispatcher;
326 tp->rp.kp.pre_handler = kprobe_dispatcher;
328 if (!event || !check_event_name(event)) {
333 tp->call.name = kstrdup(event, GFP_KERNEL);
337 if (!group || !check_event_name(group)) {
342 tp->call.system = kstrdup(group, GFP_KERNEL);
343 if (!tp->call.system)
346 INIT_LIST_HEAD(&tp->list);
349 kfree(tp->call.name);
355 static void free_probe_arg(struct probe_arg *arg)
357 if (arg->fetch.func == fetch_symbol)
358 free_symbol_cache(arg->fetch.data);
359 else if (arg->fetch.func == fetch_indirect)
360 free_indirect_fetch_data(arg->fetch.data);
364 static void free_trace_probe(struct trace_probe *tp)
368 for (i = 0; i < tp->nr_args; i++)
369 free_probe_arg(&tp->args[i]);
371 kfree(tp->call.system);
372 kfree(tp->call.name);
377 static struct trace_probe *find_probe_event(const char *event,
380 struct trace_probe *tp;
382 list_for_each_entry(tp, &probe_list, list)
383 if (strcmp(tp->call.name, event) == 0 &&
384 strcmp(tp->call.system, group) == 0)
389 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
390 static void unregister_trace_probe(struct trace_probe *tp)
392 if (probe_is_return(tp))
393 unregister_kretprobe(&tp->rp);
395 unregister_kprobe(&tp->rp.kp);
397 unregister_probe_event(tp);
400 /* Register a trace_probe and probe_event */
401 static int register_trace_probe(struct trace_probe *tp)
403 struct trace_probe *old_tp;
406 mutex_lock(&probe_lock);
408 /* register as an event */
409 old_tp = find_probe_event(tp->call.name, tp->call.system);
411 /* delete old event */
412 unregister_trace_probe(old_tp);
413 free_trace_probe(old_tp);
415 ret = register_probe_event(tp);
417 pr_warning("Faild to register probe event(%d)\n", ret);
421 tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
422 if (probe_is_return(tp))
423 ret = register_kretprobe(&tp->rp);
425 ret = register_kprobe(&tp->rp.kp);
428 pr_warning("Could not insert probe(%d)\n", ret);
429 if (ret == -EILSEQ) {
430 pr_warning("Probing address(0x%p) is not an "
431 "instruction boundary.\n",
435 unregister_probe_event(tp);
437 list_add_tail(&tp->list, &probe_list);
439 mutex_unlock(&probe_lock);
443 /* Split symbol and offset. */
444 static int split_symbol_offset(char *symbol, unsigned long *offset)
452 tmp = strchr(symbol, '+');
454 /* skip sign because strict_strtol doesn't accept '+' */
455 ret = strict_strtoul(tmp + 1, 0, offset);
464 #define PARAM_MAX_ARGS 16
465 #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
467 static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
472 if (strcmp(arg, "retval") == 0) {
474 ff->func = fetch_retvalue;
478 } else if (strncmp(arg, "stack", 5) == 0) {
479 if (arg[5] == '\0') {
480 ff->func = fetch_stack_address;
482 } else if (isdigit(arg[5])) {
483 ret = strict_strtoul(arg + 5, 10, ¶m);
484 if (ret || param > PARAM_MAX_STACK)
487 ff->func = fetch_stack;
488 ff->data = (void *)param;
492 } else if (strncmp(arg, "arg", 3) == 0 && isdigit(arg[3])) {
493 ret = strict_strtoul(arg + 3, 10, ¶m);
494 if (ret || param > PARAM_MAX_ARGS)
497 ff->func = fetch_argument;
498 ff->data = (void *)param;
505 /* Recursive argument parser */
506 static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
515 ret = parse_probe_vars(arg + 1, ff, is_return);
517 case '%': /* named register */
518 ret = regs_query_register_offset(arg + 1);
520 ff->func = fetch_register;
521 ff->data = (void *)(unsigned long)ret;
525 case '@': /* memory or symbol */
526 if (isdigit(arg[1])) {
527 ret = strict_strtoul(arg + 1, 0, ¶m);
530 ff->func = fetch_memory;
531 ff->data = (void *)param;
533 ret = split_symbol_offset(arg + 1, &offset);
536 ff->data = alloc_symbol_cache(arg + 1, offset);
538 ff->func = fetch_symbol;
543 case '+': /* indirect memory */
545 tmp = strchr(arg, '(');
551 ret = strict_strtol(arg + 1, 0, &offset);
557 tmp = strrchr(arg, ')');
559 struct indirect_fetch_data *id;
561 id = kzalloc(sizeof(struct indirect_fetch_data),
566 ret = __parse_probe_arg(arg, &id->orig, is_return);
570 ff->func = fetch_indirect;
571 ff->data = (void *)id;
577 /* TODO: support custom handler */
583 /* String length checking wrapper */
584 static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
586 if (strlen(arg) > MAX_ARGSTR_LEN) {
587 pr_info("Argument is too long.: %s\n", arg);
590 return __parse_probe_arg(arg, ff, is_return);
593 /* Return 1 if name is reserved or already used by another argument */
594 static int conflict_field_name(const char *name,
595 struct probe_arg *args, int narg)
598 for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++)
599 if (strcmp(reserved_field_names[i], name) == 0)
601 for (i = 0; i < narg; i++)
602 if (strcmp(args[i].name, name) == 0)
607 static int create_trace_probe(int argc, char **argv)
611 * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
612 * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
614 * $argN : fetch Nth of function argument. (N:0-)
615 * $retval : fetch return value
616 * $stack : fetch stack address
617 * $stackN : fetch Nth of stack (N:0-)
618 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
619 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
620 * %REG : fetch register REG
621 * Indirect memory fetch:
622 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
623 * Alias name of args:
624 * NAME=FETCHARG : set NAME as alias of FETCHARG.
626 struct trace_probe *tp;
628 int is_return = 0, is_delete = 0;
629 char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL;
630 unsigned long offset = 0;
632 char buf[MAX_EVENT_NAME_LEN];
634 /* argc must be >= 1 */
635 if (argv[0][0] == 'p')
637 else if (argv[0][0] == 'r')
639 else if (argv[0][0] == '-')
642 pr_info("Probe definition must be started with 'p', 'r' or"
647 if (argv[0][1] == ':') {
649 if (strchr(event, '/')) {
651 event = strchr(group, '/') + 1;
653 if (strlen(group) == 0) {
654 pr_info("Group name is not specifiled\n");
658 if (strlen(event) == 0) {
659 pr_info("Event name is not specifiled\n");
664 group = KPROBE_EVENT_SYSTEM;
668 pr_info("Delete command needs an event name.\n");
671 tp = find_probe_event(event, group);
673 pr_info("Event %s/%s doesn't exist.\n", group, event);
676 /* delete an event */
677 unregister_trace_probe(tp);
678 free_trace_probe(tp);
683 pr_info("Probe point is not specified.\n");
686 if (isdigit(argv[1][0])) {
688 pr_info("Return probe point must be a symbol.\n");
691 /* an address specified */
692 ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr);
694 pr_info("Failed to parse address.\n");
698 /* a symbol specified */
700 /* TODO: support .init module functions */
701 ret = split_symbol_offset(symbol, &offset);
703 pr_info("Failed to parse symbol.\n");
706 if (offset && is_return) {
707 pr_info("Return probe must be used without offset.\n");
711 argc -= 2; argv += 2;
715 /* Make a new event name */
717 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
718 is_return ? 'r' : 'p', symbol, offset);
720 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
721 is_return ? 'r' : 'p', addr);
724 tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
727 pr_info("Failed to allocate trace_probe.(%d)\n",
732 /* parse arguments */
734 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
735 /* Parse argument name */
736 arg = strchr(argv[i], '=');
742 if (conflict_field_name(argv[i], tp->args, i)) {
743 pr_info("Argument%d name '%s' conflicts with "
744 "another field.\n", i, argv[i]);
749 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
750 if (!tp->args[i].name) {
751 pr_info("Failed to allocate argument%d name '%s'.\n",
757 /* Parse fetch argument */
758 ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return);
760 pr_info("Parse error at argument%d. (%d)\n", i, ret);
761 kfree(tp->args[i].name);
768 ret = register_trace_probe(tp);
774 free_trace_probe(tp);
778 static void cleanup_all_probes(void)
780 struct trace_probe *tp;
782 mutex_lock(&probe_lock);
783 /* TODO: Use batch unregistration */
784 while (!list_empty(&probe_list)) {
785 tp = list_entry(probe_list.next, struct trace_probe, list);
786 unregister_trace_probe(tp);
787 free_trace_probe(tp);
789 mutex_unlock(&probe_lock);
793 /* Probes listing interfaces */
794 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
796 mutex_lock(&probe_lock);
797 return seq_list_start(&probe_list, *pos);
800 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
802 return seq_list_next(v, &probe_list, pos);
805 static void probes_seq_stop(struct seq_file *m, void *v)
807 mutex_unlock(&probe_lock);
810 static int probes_seq_show(struct seq_file *m, void *v)
812 struct trace_probe *tp = v;
814 char buf[MAX_ARGSTR_LEN + 1];
816 seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
817 seq_printf(m, ":%s/%s", tp->call.system, tp->call.name);
820 seq_printf(m, " 0x%p", tp->rp.kp.addr);
821 else if (tp->rp.kp.offset)
822 seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset);
824 seq_printf(m, " %s", probe_symbol(tp));
826 for (i = 0; i < tp->nr_args; i++) {
827 ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch);
829 pr_warning("Argument%d decoding error(%d).\n", i, ret);
832 seq_printf(m, " %s=%s", tp->args[i].name, buf);
838 static const struct seq_operations probes_seq_op = {
839 .start = probes_seq_start,
840 .next = probes_seq_next,
841 .stop = probes_seq_stop,
842 .show = probes_seq_show
845 static int probes_open(struct inode *inode, struct file *file)
847 if ((file->f_mode & FMODE_WRITE) &&
848 (file->f_flags & O_TRUNC))
849 cleanup_all_probes();
851 return seq_open(file, &probes_seq_op);
854 static int command_trace_probe(const char *buf)
857 int argc = 0, ret = 0;
859 argv = argv_split(GFP_KERNEL, buf, &argc);
864 ret = create_trace_probe(argc, argv);
870 #define WRITE_BUFSIZE 128
872 static ssize_t probes_write(struct file *file, const char __user *buffer,
873 size_t count, loff_t *ppos)
880 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
885 while (done < count) {
887 if (size >= WRITE_BUFSIZE)
888 size = WRITE_BUFSIZE - 1;
889 if (copy_from_user(kbuf, buffer + done, size)) {
894 tmp = strchr(kbuf, '\n');
897 size = tmp - kbuf + 1;
898 } else if (done + size < count) {
899 pr_warning("Line length is too long: "
900 "Should be less than %d.", WRITE_BUFSIZE);
905 /* Remove comments */
906 tmp = strchr(kbuf, '#');
910 ret = command_trace_probe(kbuf);
920 static const struct file_operations kprobe_events_ops = {
921 .owner = THIS_MODULE,
925 .release = seq_release,
926 .write = probes_write,
929 /* Probes profiling interfaces */
930 static int probes_profile_seq_show(struct seq_file *m, void *v)
932 struct trace_probe *tp = v;
934 seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
940 static const struct seq_operations profile_seq_op = {
941 .start = probes_seq_start,
942 .next = probes_seq_next,
943 .stop = probes_seq_stop,
944 .show = probes_profile_seq_show
947 static int profile_open(struct inode *inode, struct file *file)
949 return seq_open(file, &profile_seq_op);
952 static const struct file_operations kprobe_profile_ops = {
953 .owner = THIS_MODULE,
954 .open = profile_open,
957 .release = seq_release,
961 static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
963 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
964 struct kprobe_trace_entry *entry;
965 struct ring_buffer_event *event;
966 struct ring_buffer *buffer;
968 unsigned long irq_flags;
969 struct ftrace_event_call *call = &tp->call;
973 local_save_flags(irq_flags);
974 pc = preempt_count();
976 size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
978 event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
983 entry = ring_buffer_event_data(event);
984 entry->nargs = tp->nr_args;
985 entry->ip = (unsigned long)kp->addr;
986 for (i = 0; i < tp->nr_args; i++)
987 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
989 if (!filter_current_check_discard(buffer, call, entry, event))
990 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
994 /* Kretprobe handler */
995 static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
996 struct pt_regs *regs)
998 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
999 struct kretprobe_trace_entry *entry;
1000 struct ring_buffer_event *event;
1001 struct ring_buffer *buffer;
1003 unsigned long irq_flags;
1004 struct ftrace_event_call *call = &tp->call;
1006 local_save_flags(irq_flags);
1007 pc = preempt_count();
1009 size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
1011 event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
1016 entry = ring_buffer_event_data(event);
1017 entry->nargs = tp->nr_args;
1018 entry->func = (unsigned long)tp->rp.kp.addr;
1019 entry->ret_ip = (unsigned long)ri->ret_addr;
1020 for (i = 0; i < tp->nr_args; i++)
1021 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1023 if (!filter_current_check_discard(buffer, call, entry, event))
1024 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
1029 /* Event entry printers */
1031 print_kprobe_event(struct trace_iterator *iter, int flags)
1033 struct kprobe_trace_entry *field;
1034 struct trace_seq *s = &iter->seq;
1035 struct trace_event *event;
1036 struct trace_probe *tp;
1039 field = (struct kprobe_trace_entry *)iter->ent;
1040 event = ftrace_find_event(field->ent.type);
1041 tp = container_of(event, struct trace_probe, event);
1043 if (!trace_seq_printf(s, "%s: (", tp->call.name))
1046 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1049 if (!trace_seq_puts(s, ")"))
1052 for (i = 0; i < field->nargs; i++)
1053 if (!trace_seq_printf(s, " %s=%lx",
1054 tp->args[i].name, field->args[i]))
1057 if (!trace_seq_puts(s, "\n"))
1060 return TRACE_TYPE_HANDLED;
1062 return TRACE_TYPE_PARTIAL_LINE;
1066 print_kretprobe_event(struct trace_iterator *iter, int flags)
1068 struct kretprobe_trace_entry *field;
1069 struct trace_seq *s = &iter->seq;
1070 struct trace_event *event;
1071 struct trace_probe *tp;
1074 field = (struct kretprobe_trace_entry *)iter->ent;
1075 event = ftrace_find_event(field->ent.type);
1076 tp = container_of(event, struct trace_probe, event);
1078 if (!trace_seq_printf(s, "%s: (", tp->call.name))
1081 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1084 if (!trace_seq_puts(s, " <- "))
1087 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1090 if (!trace_seq_puts(s, ")"))
1093 for (i = 0; i < field->nargs; i++)
1094 if (!trace_seq_printf(s, " %s=%lx",
1095 tp->args[i].name, field->args[i]))
1098 if (!trace_seq_puts(s, "\n"))
1101 return TRACE_TYPE_HANDLED;
1103 return TRACE_TYPE_PARTIAL_LINE;
1106 static int probe_event_enable(struct ftrace_event_call *call)
1108 struct trace_probe *tp = (struct trace_probe *)call->data;
1110 tp->flags |= TP_FLAG_TRACE;
1111 if (probe_is_return(tp))
1112 return enable_kretprobe(&tp->rp);
1114 return enable_kprobe(&tp->rp.kp);
1117 static void probe_event_disable(struct ftrace_event_call *call)
1119 struct trace_probe *tp = (struct trace_probe *)call->data;
1121 tp->flags &= ~TP_FLAG_TRACE;
1122 if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) {
1123 if (probe_is_return(tp))
1124 disable_kretprobe(&tp->rp);
1126 disable_kprobe(&tp->rp.kp);
1130 static int probe_event_raw_init(struct ftrace_event_call *event_call)
1132 INIT_LIST_HEAD(&event_call->fields);
1138 #define DEFINE_FIELD(type, item, name, is_signed) \
1140 ret = trace_define_field(event_call, #type, name, \
1141 offsetof(typeof(field), item), \
1142 sizeof(field.item), is_signed, \
1148 static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
1151 struct kprobe_trace_entry field;
1152 struct trace_probe *tp = (struct trace_probe *)event_call->data;
1154 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1155 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
1156 /* Set argument names as fields */
1157 for (i = 0; i < tp->nr_args; i++)
1158 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
1162 static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1165 struct kretprobe_trace_entry field;
1166 struct trace_probe *tp = (struct trace_probe *)event_call->data;
1168 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1169 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1170 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
1171 /* Set argument names as fields */
1172 for (i = 0; i < tp->nr_args; i++)
1173 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
1177 static int __probe_event_show_format(struct trace_seq *s,
1178 struct trace_probe *tp, const char *fmt,
1184 if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt))
1187 for (i = 0; i < tp->nr_args; i++)
1188 if (!trace_seq_printf(s, " %s=%%lx", tp->args[i].name))
1191 if (!trace_seq_printf(s, "\", %s", arg))
1194 for (i = 0; i < tp->nr_args; i++)
1195 if (!trace_seq_printf(s, ", REC->%s", tp->args[i].name))
1198 return trace_seq_puts(s, "\n");
1202 #define SHOW_FIELD(type, item, name) \
1204 ret = trace_seq_printf(s, "\tfield:" #type " %s;\t" \
1205 "offset:%u;\tsize:%u;\tsigned:%d;\n", name,\
1206 (unsigned int)offsetof(typeof(field), item),\
1207 (unsigned int)sizeof(type), \
1208 is_signed_type(type)); \
1213 static int kprobe_event_show_format(struct ftrace_event_call *call,
1214 struct trace_seq *s)
1216 struct kprobe_trace_entry field __attribute__((unused));
1218 struct trace_probe *tp = (struct trace_probe *)call->data;
1220 SHOW_FIELD(unsigned long, ip, FIELD_STRING_IP);
1221 SHOW_FIELD(int, nargs, FIELD_STRING_NARGS);
1224 for (i = 0; i < tp->nr_args; i++)
1225 SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
1226 trace_seq_puts(s, "\n");
1228 return __probe_event_show_format(s, tp, "(%lx)",
1229 "REC->" FIELD_STRING_IP);
1232 static int kretprobe_event_show_format(struct ftrace_event_call *call,
1233 struct trace_seq *s)
1235 struct kretprobe_trace_entry field __attribute__((unused));
1237 struct trace_probe *tp = (struct trace_probe *)call->data;
1239 SHOW_FIELD(unsigned long, func, FIELD_STRING_FUNC);
1240 SHOW_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP);
1241 SHOW_FIELD(int, nargs, FIELD_STRING_NARGS);
1244 for (i = 0; i < tp->nr_args; i++)
1245 SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
1246 trace_seq_puts(s, "\n");
1248 return __probe_event_show_format(s, tp, "(%lx <- %lx)",
1249 "REC->" FIELD_STRING_FUNC
1250 ", REC->" FIELD_STRING_RETIP);
1253 #ifdef CONFIG_EVENT_PROFILE
1255 /* Kprobe profile handler */
1256 static __kprobes int kprobe_profile_func(struct kprobe *kp,
1257 struct pt_regs *regs)
1259 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1260 struct ftrace_event_call *call = &tp->call;
1261 struct kprobe_trace_entry *entry;
1262 struct trace_entry *ent;
1263 int size, __size, i, pc, __cpu;
1264 unsigned long irq_flags;
1269 pc = preempt_count();
1270 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
1271 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1272 size -= sizeof(u32);
1273 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
1274 "profile buffer not large enough"))
1278 * Protect the non nmi buffer
1279 * This also protects the rcu read side
1281 local_irq_save(irq_flags);
1283 rctx = perf_swevent_get_recursion_context();
1287 __cpu = smp_processor_id();
1290 trace_buf = rcu_dereference(perf_trace_buf_nmi);
1292 trace_buf = rcu_dereference(perf_trace_buf);
1297 raw_data = per_cpu_ptr(trace_buf, __cpu);
1299 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1300 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
1301 entry = (struct kprobe_trace_entry *)raw_data;
1304 tracing_generic_entry_update(ent, irq_flags, pc);
1305 ent->type = call->id;
1306 entry->nargs = tp->nr_args;
1307 entry->ip = (unsigned long)kp->addr;
1308 for (i = 0; i < tp->nr_args; i++)
1309 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1310 perf_tp_event(call->id, entry->ip, 1, entry, size);
1313 perf_swevent_put_recursion_context(rctx);
1315 local_irq_restore(irq_flags);
1320 /* Kretprobe profile handler */
1321 static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1322 struct pt_regs *regs)
1324 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1325 struct ftrace_event_call *call = &tp->call;
1326 struct kretprobe_trace_entry *entry;
1327 struct trace_entry *ent;
1328 int size, __size, i, pc, __cpu;
1329 unsigned long irq_flags;
1334 pc = preempt_count();
1335 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
1336 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1337 size -= sizeof(u32);
1338 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
1339 "profile buffer not large enough"))
1343 * Protect the non nmi buffer
1344 * This also protects the rcu read side
1346 local_irq_save(irq_flags);
1348 rctx = perf_swevent_get_recursion_context();
1352 __cpu = smp_processor_id();
1355 trace_buf = rcu_dereference(perf_trace_buf_nmi);
1357 trace_buf = rcu_dereference(perf_trace_buf);
1362 raw_data = per_cpu_ptr(trace_buf, __cpu);
1364 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1365 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
1366 entry = (struct kretprobe_trace_entry *)raw_data;
1369 tracing_generic_entry_update(ent, irq_flags, pc);
1370 ent->type = call->id;
1371 entry->nargs = tp->nr_args;
1372 entry->func = (unsigned long)tp->rp.kp.addr;
1373 entry->ret_ip = (unsigned long)ri->ret_addr;
1374 for (i = 0; i < tp->nr_args; i++)
1375 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1376 perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
1379 perf_swevent_put_recursion_context(rctx);
1381 local_irq_restore(irq_flags);
1386 static int probe_profile_enable(struct ftrace_event_call *call)
1388 struct trace_probe *tp = (struct trace_probe *)call->data;
1390 tp->flags |= TP_FLAG_PROFILE;
1392 if (probe_is_return(tp))
1393 return enable_kretprobe(&tp->rp);
1395 return enable_kprobe(&tp->rp.kp);
1398 static void probe_profile_disable(struct ftrace_event_call *call)
1400 struct trace_probe *tp = (struct trace_probe *)call->data;
1402 tp->flags &= ~TP_FLAG_PROFILE;
1404 if (!(tp->flags & TP_FLAG_TRACE)) {
1405 if (probe_is_return(tp))
1406 disable_kretprobe(&tp->rp);
1408 disable_kprobe(&tp->rp.kp);
1411 #endif /* CONFIG_EVENT_PROFILE */
1415 int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1417 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1419 if (tp->flags & TP_FLAG_TRACE)
1420 kprobe_trace_func(kp, regs);
1421 #ifdef CONFIG_EVENT_PROFILE
1422 if (tp->flags & TP_FLAG_PROFILE)
1423 kprobe_profile_func(kp, regs);
1424 #endif /* CONFIG_EVENT_PROFILE */
1425 return 0; /* We don't tweek kernel, so just return 0 */
1429 int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1431 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1433 if (tp->flags & TP_FLAG_TRACE)
1434 kretprobe_trace_func(ri, regs);
1435 #ifdef CONFIG_EVENT_PROFILE
1436 if (tp->flags & TP_FLAG_PROFILE)
1437 kretprobe_profile_func(ri, regs);
1438 #endif /* CONFIG_EVENT_PROFILE */
1439 return 0; /* We don't tweek kernel, so just return 0 */
1442 static int register_probe_event(struct trace_probe *tp)
1444 struct ftrace_event_call *call = &tp->call;
1447 /* Initialize ftrace_event_call */
1448 if (probe_is_return(tp)) {
1449 tp->event.trace = print_kretprobe_event;
1450 call->raw_init = probe_event_raw_init;
1451 call->show_format = kretprobe_event_show_format;
1452 call->define_fields = kretprobe_event_define_fields;
1454 tp->event.trace = print_kprobe_event;
1455 call->raw_init = probe_event_raw_init;
1456 call->show_format = kprobe_event_show_format;
1457 call->define_fields = kprobe_event_define_fields;
1459 call->event = &tp->event;
1460 call->id = register_ftrace_event(&tp->event);
1464 call->regfunc = probe_event_enable;
1465 call->unregfunc = probe_event_disable;
1467 #ifdef CONFIG_EVENT_PROFILE
1468 call->profile_enable = probe_profile_enable;
1469 call->profile_disable = probe_profile_disable;
1472 ret = trace_add_event_call(call);
1474 pr_info("Failed to register kprobe event: %s\n", call->name);
1475 unregister_ftrace_event(&tp->event);
1480 static void unregister_probe_event(struct trace_probe *tp)
1482 /* tp->event is unregistered in trace_remove_event_call() */
1483 trace_remove_event_call(&tp->call);
1486 /* Make a debugfs interface for controling probe points */
1487 static __init int init_kprobe_trace(void)
1489 struct dentry *d_tracer;
1490 struct dentry *entry;
1492 d_tracer = tracing_init_dentry();
1496 entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
1497 NULL, &kprobe_events_ops);
1499 /* Event list interface */
1501 pr_warning("Could not create debugfs "
1502 "'kprobe_events' entry\n");
1504 /* Profile interface */
1505 entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
1506 NULL, &kprobe_profile_ops);
1509 pr_warning("Could not create debugfs "
1510 "'kprobe_profile' entry\n");
1513 fs_initcall(init_kprobe_trace);
1516 #ifdef CONFIG_FTRACE_STARTUP_TEST
1518 static int kprobe_trace_selftest_target(int a1, int a2, int a3,
1519 int a4, int a5, int a6)
1521 return a1 + a2 + a3 + a4 + a5 + a6;
1524 static __init int kprobe_trace_self_tests_init(void)
1527 int (*target)(int, int, int, int, int, int);
1529 target = kprobe_trace_selftest_target;
1531 pr_info("Testing kprobe tracing: ");
1533 ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
1534 "$arg1 $arg2 $arg3 $arg4 $stack $stack0");
1535 if (WARN_ON_ONCE(ret))
1536 pr_warning("error enabling function entry\n");
1538 ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
1540 if (WARN_ON_ONCE(ret))
1541 pr_warning("error enabling function return\n");
1543 ret = target(1, 2, 3, 4, 5, 6);
1545 cleanup_all_probes();
1551 late_initcall(kprobe_trace_self_tests_init);