]> git.karo-electronics.de Git - karo-tx-linux.git/blob - kernel/trace/trace_kprobe.c
KVM: PPC: Book3S PR: Exit KVM on failed mapping
[karo-tx-linux.git] / kernel / trace / trace_kprobe.c
1 /*
2  * Kprobes-based tracing events
3  *
4  * Created by Masami Hiramatsu <mhiramat@redhat.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19 #define pr_fmt(fmt)     "trace_kprobe: " fmt
20
21 #include <linux/module.h>
22 #include <linux/uaccess.h>
23 #include <linux/rculist.h>
24
25 #include "trace_probe.h"
26
27 #define KPROBE_EVENT_SYSTEM "kprobes"
28
29 /**
30  * Kprobe event core functions
31  */
32 struct trace_kprobe {
33         struct list_head        list;
34         struct kretprobe        rp;     /* Use rp.kp for kprobe use */
35         unsigned long __percpu *nhit;
36         const char              *symbol;        /* symbol name */
37         struct trace_probe      tp;
38 };
39
40 #define SIZEOF_TRACE_KPROBE(n)                          \
41         (offsetof(struct trace_kprobe, tp.args) +       \
42         (sizeof(struct probe_arg) * (n)))
43
44
45 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
46 {
47         return tk->rp.handler != NULL;
48 }
49
50 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
51 {
52         return tk->symbol ? tk->symbol : "unknown";
53 }
54
55 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
56 {
57         return tk->rp.kp.offset;
58 }
59
60 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
61 {
62         return !!(kprobe_gone(&tk->rp.kp));
63 }
64
65 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
66                                                  struct module *mod)
67 {
68         int len = strlen(mod->name);
69         const char *name = trace_kprobe_symbol(tk);
70         return strncmp(mod->name, name, len) == 0 && name[len] == ':';
71 }
72
73 static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
74 {
75         return !!strchr(trace_kprobe_symbol(tk), ':');
76 }
77
78 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
79 {
80         unsigned long nhit = 0;
81         int cpu;
82
83         for_each_possible_cpu(cpu)
84                 nhit += *per_cpu_ptr(tk->nhit, cpu);
85
86         return nhit;
87 }
88
89 static int register_kprobe_event(struct trace_kprobe *tk);
90 static int unregister_kprobe_event(struct trace_kprobe *tk);
91
92 static DEFINE_MUTEX(probe_lock);
93 static LIST_HEAD(probe_list);
94
95 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
96 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
97                                 struct pt_regs *regs);
98
99 /* Memory fetching by symbol */
100 struct symbol_cache {
101         char            *symbol;
102         long            offset;
103         unsigned long   addr;
104 };
105
106 unsigned long update_symbol_cache(struct symbol_cache *sc)
107 {
108         sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
109
110         if (sc->addr)
111                 sc->addr += sc->offset;
112
113         return sc->addr;
114 }
115
116 void free_symbol_cache(struct symbol_cache *sc)
117 {
118         kfree(sc->symbol);
119         kfree(sc);
120 }
121
122 struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
123 {
124         struct symbol_cache *sc;
125
126         if (!sym || strlen(sym) == 0)
127                 return NULL;
128
129         sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
130         if (!sc)
131                 return NULL;
132
133         sc->symbol = kstrdup(sym, GFP_KERNEL);
134         if (!sc->symbol) {
135                 kfree(sc);
136                 return NULL;
137         }
138         sc->offset = offset;
139         update_symbol_cache(sc);
140
141         return sc;
142 }
143
144 /*
145  * Kprobes-specific fetch functions
146  */
147 #define DEFINE_FETCH_stack(type)                                        \
148 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,          \
149                                           void *offset, void *dest)     \
150 {                                                                       \
151         *(type *)dest = (type)regs_get_kernel_stack_nth(regs,           \
152                                 (unsigned int)((unsigned long)offset)); \
153 }                                                                       \
154 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
155
156 DEFINE_BASIC_FETCH_FUNCS(stack)
157 /* No string on the stack entry */
158 #define fetch_stack_string      NULL
159 #define fetch_stack_string_size NULL
160
161 #define DEFINE_FETCH_memory(type)                                       \
162 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,         \
163                                           void *addr, void *dest)       \
164 {                                                                       \
165         type retval;                                                    \
166         if (probe_kernel_address(addr, retval))                         \
167                 *(type *)dest = 0;                                      \
168         else                                                            \
169                 *(type *)dest = retval;                                 \
170 }                                                                       \
171 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
172
173 DEFINE_BASIC_FETCH_FUNCS(memory)
174 /*
175  * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
176  * length and relative data location.
177  */
178 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
179                                             void *addr, void *dest)
180 {
181         int maxlen = get_rloc_len(*(u32 *)dest);
182         u8 *dst = get_rloc_data(dest);
183         long ret;
184
185         if (!maxlen)
186                 return;
187
188         /*
189          * Try to get string again, since the string can be changed while
190          * probing.
191          */
192         ret = strncpy_from_unsafe(dst, addr, maxlen);
193
194         if (ret < 0) {  /* Failed to fetch string */
195                 dst[0] = '\0';
196                 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
197         } else {
198                 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
199         }
200 }
201 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
202
203 /* Return the length of string -- including null terminal byte */
204 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
205                                                  void *addr, void *dest)
206 {
207         mm_segment_t old_fs;
208         int ret, len = 0;
209         u8 c;
210
211         old_fs = get_fs();
212         set_fs(KERNEL_DS);
213         pagefault_disable();
214
215         do {
216                 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
217                 len++;
218         } while (c && ret == 0 && len < MAX_STRING_SIZE);
219
220         pagefault_enable();
221         set_fs(old_fs);
222
223         if (ret < 0)    /* Failed to check the length */
224                 *(u32 *)dest = 0;
225         else
226                 *(u32 *)dest = len;
227 }
228 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
229
230 #define DEFINE_FETCH_symbol(type)                                       \
231 void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
232 {                                                                       \
233         struct symbol_cache *sc = data;                                 \
234         if (sc->addr)                                                   \
235                 fetch_memory_##type(regs, (void *)sc->addr, dest);      \
236         else                                                            \
237                 *(type *)dest = 0;                                      \
238 }                                                                       \
239 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
240
241 DEFINE_BASIC_FETCH_FUNCS(symbol)
242 DEFINE_FETCH_symbol(string)
243 DEFINE_FETCH_symbol(string_size)
244
245 /* kprobes don't support file_offset fetch methods */
246 #define fetch_file_offset_u8            NULL
247 #define fetch_file_offset_u16           NULL
248 #define fetch_file_offset_u32           NULL
249 #define fetch_file_offset_u64           NULL
250 #define fetch_file_offset_string        NULL
251 #define fetch_file_offset_string_size   NULL
252
253 /* Fetch type information table */
254 static const struct fetch_type kprobes_fetch_type_table[] = {
255         /* Special types */
256         [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
257                                         sizeof(u32), 1, "__data_loc char[]"),
258         [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
259                                         string_size, sizeof(u32), 0, "u32"),
260         /* Basic types */
261         ASSIGN_FETCH_TYPE(u8,  u8,  0),
262         ASSIGN_FETCH_TYPE(u16, u16, 0),
263         ASSIGN_FETCH_TYPE(u32, u32, 0),
264         ASSIGN_FETCH_TYPE(u64, u64, 0),
265         ASSIGN_FETCH_TYPE(s8,  u8,  1),
266         ASSIGN_FETCH_TYPE(s16, u16, 1),
267         ASSIGN_FETCH_TYPE(s32, u32, 1),
268         ASSIGN_FETCH_TYPE(s64, u64, 1),
269         ASSIGN_FETCH_TYPE_ALIAS(x8,  u8,  u8,  0),
270         ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
271         ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
272         ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
273
274         ASSIGN_FETCH_TYPE_END
275 };
276
277 /*
278  * Allocate new trace_probe and initialize it (including kprobes).
279  */
280 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
281                                              const char *event,
282                                              void *addr,
283                                              const char *symbol,
284                                              unsigned long offs,
285                                              int nargs, bool is_return)
286 {
287         struct trace_kprobe *tk;
288         int ret = -ENOMEM;
289
290         tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
291         if (!tk)
292                 return ERR_PTR(ret);
293
294         tk->nhit = alloc_percpu(unsigned long);
295         if (!tk->nhit)
296                 goto error;
297
298         if (symbol) {
299                 tk->symbol = kstrdup(symbol, GFP_KERNEL);
300                 if (!tk->symbol)
301                         goto error;
302                 tk->rp.kp.symbol_name = tk->symbol;
303                 tk->rp.kp.offset = offs;
304         } else
305                 tk->rp.kp.addr = addr;
306
307         if (is_return)
308                 tk->rp.handler = kretprobe_dispatcher;
309         else
310                 tk->rp.kp.pre_handler = kprobe_dispatcher;
311
312         if (!event || !is_good_name(event)) {
313                 ret = -EINVAL;
314                 goto error;
315         }
316
317         tk->tp.call.class = &tk->tp.class;
318         tk->tp.call.name = kstrdup(event, GFP_KERNEL);
319         if (!tk->tp.call.name)
320                 goto error;
321
322         if (!group || !is_good_name(group)) {
323                 ret = -EINVAL;
324                 goto error;
325         }
326
327         tk->tp.class.system = kstrdup(group, GFP_KERNEL);
328         if (!tk->tp.class.system)
329                 goto error;
330
331         INIT_LIST_HEAD(&tk->list);
332         INIT_LIST_HEAD(&tk->tp.files);
333         return tk;
334 error:
335         kfree(tk->tp.call.name);
336         kfree(tk->symbol);
337         free_percpu(tk->nhit);
338         kfree(tk);
339         return ERR_PTR(ret);
340 }
341
342 static void free_trace_kprobe(struct trace_kprobe *tk)
343 {
344         int i;
345
346         for (i = 0; i < tk->tp.nr_args; i++)
347                 traceprobe_free_probe_arg(&tk->tp.args[i]);
348
349         kfree(tk->tp.call.class->system);
350         kfree(tk->tp.call.name);
351         kfree(tk->symbol);
352         free_percpu(tk->nhit);
353         kfree(tk);
354 }
355
356 static struct trace_kprobe *find_trace_kprobe(const char *event,
357                                               const char *group)
358 {
359         struct trace_kprobe *tk;
360
361         list_for_each_entry(tk, &probe_list, list)
362                 if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
363                     strcmp(tk->tp.call.class->system, group) == 0)
364                         return tk;
365         return NULL;
366 }
367
368 /*
369  * Enable trace_probe
370  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
371  */
372 static int
373 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
374 {
375         int ret = 0;
376
377         if (file) {
378                 struct event_file_link *link;
379
380                 link = kmalloc(sizeof(*link), GFP_KERNEL);
381                 if (!link) {
382                         ret = -ENOMEM;
383                         goto out;
384                 }
385
386                 link->file = file;
387                 list_add_tail_rcu(&link->list, &tk->tp.files);
388
389                 tk->tp.flags |= TP_FLAG_TRACE;
390         } else
391                 tk->tp.flags |= TP_FLAG_PROFILE;
392
393         if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
394                 if (trace_kprobe_is_return(tk))
395                         ret = enable_kretprobe(&tk->rp);
396                 else
397                         ret = enable_kprobe(&tk->rp.kp);
398         }
399  out:
400         return ret;
401 }
402
403 /*
404  * Disable trace_probe
405  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
406  */
407 static int
408 disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
409 {
410         struct event_file_link *link = NULL;
411         int wait = 0;
412         int ret = 0;
413
414         if (file) {
415                 link = find_event_file_link(&tk->tp, file);
416                 if (!link) {
417                         ret = -EINVAL;
418                         goto out;
419                 }
420
421                 list_del_rcu(&link->list);
422                 wait = 1;
423                 if (!list_empty(&tk->tp.files))
424                         goto out;
425
426                 tk->tp.flags &= ~TP_FLAG_TRACE;
427         } else
428                 tk->tp.flags &= ~TP_FLAG_PROFILE;
429
430         if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
431                 if (trace_kprobe_is_return(tk))
432                         disable_kretprobe(&tk->rp);
433                 else
434                         disable_kprobe(&tk->rp.kp);
435                 wait = 1;
436         }
437  out:
438         if (wait) {
439                 /*
440                  * Synchronize with kprobe_trace_func/kretprobe_trace_func
441                  * to ensure disabled (all running handlers are finished).
442                  * This is not only for kfree(), but also the caller,
443                  * trace_remove_event_call() supposes it for releasing
444                  * event_call related objects, which will be accessed in
445                  * the kprobe_trace_func/kretprobe_trace_func.
446                  */
447                 synchronize_sched();
448                 kfree(link);    /* Ignored if link == NULL */
449         }
450
451         return ret;
452 }
453
454 /* Internal register function - just handle k*probes and flags */
455 static int __register_trace_kprobe(struct trace_kprobe *tk)
456 {
457         int i, ret;
458
459         if (trace_probe_is_registered(&tk->tp))
460                 return -EINVAL;
461
462         for (i = 0; i < tk->tp.nr_args; i++)
463                 traceprobe_update_arg(&tk->tp.args[i]);
464
465         /* Set/clear disabled flag according to tp->flag */
466         if (trace_probe_is_enabled(&tk->tp))
467                 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
468         else
469                 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
470
471         if (trace_kprobe_is_return(tk))
472                 ret = register_kretprobe(&tk->rp);
473         else
474                 ret = register_kprobe(&tk->rp.kp);
475
476         if (ret == 0)
477                 tk->tp.flags |= TP_FLAG_REGISTERED;
478         else {
479                 pr_warn("Could not insert probe at %s+%lu: %d\n",
480                         trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
481                 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
482                         pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
483                         ret = 0;
484                 } else if (ret == -EILSEQ) {
485                         pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
486                                 tk->rp.kp.addr);
487                         ret = -EINVAL;
488                 }
489         }
490
491         return ret;
492 }
493
494 /* Internal unregister function - just handle k*probes and flags */
495 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
496 {
497         if (trace_probe_is_registered(&tk->tp)) {
498                 if (trace_kprobe_is_return(tk))
499                         unregister_kretprobe(&tk->rp);
500                 else
501                         unregister_kprobe(&tk->rp.kp);
502                 tk->tp.flags &= ~TP_FLAG_REGISTERED;
503                 /* Cleanup kprobe for reuse */
504                 if (tk->rp.kp.symbol_name)
505                         tk->rp.kp.addr = NULL;
506         }
507 }
508
509 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
510 static int unregister_trace_kprobe(struct trace_kprobe *tk)
511 {
512         /* Enabled event can not be unregistered */
513         if (trace_probe_is_enabled(&tk->tp))
514                 return -EBUSY;
515
516         /* Will fail if probe is being used by ftrace or perf */
517         if (unregister_kprobe_event(tk))
518                 return -EBUSY;
519
520         __unregister_trace_kprobe(tk);
521         list_del(&tk->list);
522
523         return 0;
524 }
525
526 /* Register a trace_probe and probe_event */
527 static int register_trace_kprobe(struct trace_kprobe *tk)
528 {
529         struct trace_kprobe *old_tk;
530         int ret;
531
532         mutex_lock(&probe_lock);
533
534         /* Delete old (same name) event if exist */
535         old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
536                         tk->tp.call.class->system);
537         if (old_tk) {
538                 ret = unregister_trace_kprobe(old_tk);
539                 if (ret < 0)
540                         goto end;
541                 free_trace_kprobe(old_tk);
542         }
543
544         /* Register new event */
545         ret = register_kprobe_event(tk);
546         if (ret) {
547                 pr_warn("Failed to register probe event(%d)\n", ret);
548                 goto end;
549         }
550
551         /* Register k*probe */
552         ret = __register_trace_kprobe(tk);
553         if (ret < 0)
554                 unregister_kprobe_event(tk);
555         else
556                 list_add_tail(&tk->list, &probe_list);
557
558 end:
559         mutex_unlock(&probe_lock);
560         return ret;
561 }
562
563 /* Module notifier call back, checking event on the module */
564 static int trace_kprobe_module_callback(struct notifier_block *nb,
565                                        unsigned long val, void *data)
566 {
567         struct module *mod = data;
568         struct trace_kprobe *tk;
569         int ret;
570
571         if (val != MODULE_STATE_COMING)
572                 return NOTIFY_DONE;
573
574         /* Update probes on coming module */
575         mutex_lock(&probe_lock);
576         list_for_each_entry(tk, &probe_list, list) {
577                 if (trace_kprobe_within_module(tk, mod)) {
578                         /* Don't need to check busy - this should have gone. */
579                         __unregister_trace_kprobe(tk);
580                         ret = __register_trace_kprobe(tk);
581                         if (ret)
582                                 pr_warn("Failed to re-register probe %s on %s: %d\n",
583                                         trace_event_name(&tk->tp.call),
584                                         mod->name, ret);
585                 }
586         }
587         mutex_unlock(&probe_lock);
588
589         return NOTIFY_DONE;
590 }
591
592 static struct notifier_block trace_kprobe_module_nb = {
593         .notifier_call = trace_kprobe_module_callback,
594         .priority = 1   /* Invoked after kprobe module callback */
595 };
596
597 static int create_trace_kprobe(int argc, char **argv)
598 {
599         /*
600          * Argument syntax:
601          *  - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
602          *  - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
603          * Fetch args:
604          *  $retval     : fetch return value
605          *  $stack      : fetch stack address
606          *  $stackN     : fetch Nth of stack (N:0-)
607          *  $comm       : fetch current task comm
608          *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
609          *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
610          *  %REG        : fetch register REG
611          * Dereferencing memory fetch:
612          *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
613          * Alias name of args:
614          *  NAME=FETCHARG : set NAME as alias of FETCHARG.
615          * Type of args:
616          *  FETCHARG:TYPE : use TYPE instead of unsigned long.
617          */
618         struct trace_kprobe *tk;
619         int i, ret = 0;
620         bool is_return = false, is_delete = false;
621         char *symbol = NULL, *event = NULL, *group = NULL;
622         char *arg;
623         unsigned long offset = 0;
624         void *addr = NULL;
625         char buf[MAX_EVENT_NAME_LEN];
626
627         /* argc must be >= 1 */
628         if (argv[0][0] == 'p')
629                 is_return = false;
630         else if (argv[0][0] == 'r')
631                 is_return = true;
632         else if (argv[0][0] == '-')
633                 is_delete = true;
634         else {
635                 pr_info("Probe definition must be started with 'p', 'r' or"
636                         " '-'.\n");
637                 return -EINVAL;
638         }
639
640         if (argv[0][1] == ':') {
641                 event = &argv[0][2];
642                 if (strchr(event, '/')) {
643                         group = event;
644                         event = strchr(group, '/') + 1;
645                         event[-1] = '\0';
646                         if (strlen(group) == 0) {
647                                 pr_info("Group name is not specified\n");
648                                 return -EINVAL;
649                         }
650                 }
651                 if (strlen(event) == 0) {
652                         pr_info("Event name is not specified\n");
653                         return -EINVAL;
654                 }
655         }
656         if (!group)
657                 group = KPROBE_EVENT_SYSTEM;
658
659         if (is_delete) {
660                 if (!event) {
661                         pr_info("Delete command needs an event name.\n");
662                         return -EINVAL;
663                 }
664                 mutex_lock(&probe_lock);
665                 tk = find_trace_kprobe(event, group);
666                 if (!tk) {
667                         mutex_unlock(&probe_lock);
668                         pr_info("Event %s/%s doesn't exist.\n", group, event);
669                         return -ENOENT;
670                 }
671                 /* delete an event */
672                 ret = unregister_trace_kprobe(tk);
673                 if (ret == 0)
674                         free_trace_kprobe(tk);
675                 mutex_unlock(&probe_lock);
676                 return ret;
677         }
678
679         if (argc < 2) {
680                 pr_info("Probe point is not specified.\n");
681                 return -EINVAL;
682         }
683         if (isdigit(argv[1][0])) {
684                 if (is_return) {
685                         pr_info("Return probe point must be a symbol.\n");
686                         return -EINVAL;
687                 }
688                 /* an address specified */
689                 ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
690                 if (ret) {
691                         pr_info("Failed to parse address.\n");
692                         return ret;
693                 }
694         } else {
695                 /* a symbol specified */
696                 symbol = argv[1];
697                 /* TODO: support .init module functions */
698                 ret = traceprobe_split_symbol_offset(symbol, &offset);
699                 if (ret) {
700                         pr_info("Failed to parse symbol.\n");
701                         return ret;
702                 }
703                 if (offset && is_return) {
704                         pr_info("Return probe must be used without offset.\n");
705                         return -EINVAL;
706                 }
707         }
708         argc -= 2; argv += 2;
709
710         /* setup a probe */
711         if (!event) {
712                 /* Make a new event name */
713                 if (symbol)
714                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
715                                  is_return ? 'r' : 'p', symbol, offset);
716                 else
717                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
718                                  is_return ? 'r' : 'p', addr);
719                 event = buf;
720         }
721         tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc,
722                                is_return);
723         if (IS_ERR(tk)) {
724                 pr_info("Failed to allocate trace_probe.(%d)\n",
725                         (int)PTR_ERR(tk));
726                 return PTR_ERR(tk);
727         }
728
729         /* parse arguments */
730         ret = 0;
731         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
732                 struct probe_arg *parg = &tk->tp.args[i];
733
734                 /* Increment count for freeing args in error case */
735                 tk->tp.nr_args++;
736
737                 /* Parse argument name */
738                 arg = strchr(argv[i], '=');
739                 if (arg) {
740                         *arg++ = '\0';
741                         parg->name = kstrdup(argv[i], GFP_KERNEL);
742                 } else {
743                         arg = argv[i];
744                         /* If argument name is omitted, set "argN" */
745                         snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
746                         parg->name = kstrdup(buf, GFP_KERNEL);
747                 }
748
749                 if (!parg->name) {
750                         pr_info("Failed to allocate argument[%d] name.\n", i);
751                         ret = -ENOMEM;
752                         goto error;
753                 }
754
755                 if (!is_good_name(parg->name)) {
756                         pr_info("Invalid argument[%d] name: %s\n",
757                                 i, parg->name);
758                         ret = -EINVAL;
759                         goto error;
760                 }
761
762                 if (traceprobe_conflict_field_name(parg->name,
763                                                         tk->tp.args, i)) {
764                         pr_info("Argument[%d] name '%s' conflicts with "
765                                 "another field.\n", i, argv[i]);
766                         ret = -EINVAL;
767                         goto error;
768                 }
769
770                 /* Parse fetch argument */
771                 ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
772                                                 is_return, true,
773                                                 kprobes_fetch_type_table);
774                 if (ret) {
775                         pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
776                         goto error;
777                 }
778         }
779
780         ret = register_trace_kprobe(tk);
781         if (ret)
782                 goto error;
783         return 0;
784
785 error:
786         free_trace_kprobe(tk);
787         return ret;
788 }
789
790 static int release_all_trace_kprobes(void)
791 {
792         struct trace_kprobe *tk;
793         int ret = 0;
794
795         mutex_lock(&probe_lock);
796         /* Ensure no probe is in use. */
797         list_for_each_entry(tk, &probe_list, list)
798                 if (trace_probe_is_enabled(&tk->tp)) {
799                         ret = -EBUSY;
800                         goto end;
801                 }
802         /* TODO: Use batch unregistration */
803         while (!list_empty(&probe_list)) {
804                 tk = list_entry(probe_list.next, struct trace_kprobe, list);
805                 ret = unregister_trace_kprobe(tk);
806                 if (ret)
807                         goto end;
808                 free_trace_kprobe(tk);
809         }
810
811 end:
812         mutex_unlock(&probe_lock);
813
814         return ret;
815 }
816
817 /* Probes listing interfaces */
818 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
819 {
820         mutex_lock(&probe_lock);
821         return seq_list_start(&probe_list, *pos);
822 }
823
824 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
825 {
826         return seq_list_next(v, &probe_list, pos);
827 }
828
829 static void probes_seq_stop(struct seq_file *m, void *v)
830 {
831         mutex_unlock(&probe_lock);
832 }
833
834 static int probes_seq_show(struct seq_file *m, void *v)
835 {
836         struct trace_kprobe *tk = v;
837         int i;
838
839         seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
840         seq_printf(m, ":%s/%s", tk->tp.call.class->system,
841                         trace_event_name(&tk->tp.call));
842
843         if (!tk->symbol)
844                 seq_printf(m, " 0x%p", tk->rp.kp.addr);
845         else if (tk->rp.kp.offset)
846                 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
847                            tk->rp.kp.offset);
848         else
849                 seq_printf(m, " %s", trace_kprobe_symbol(tk));
850
851         for (i = 0; i < tk->tp.nr_args; i++)
852                 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
853         seq_putc(m, '\n');
854
855         return 0;
856 }
857
858 static const struct seq_operations probes_seq_op = {
859         .start  = probes_seq_start,
860         .next   = probes_seq_next,
861         .stop   = probes_seq_stop,
862         .show   = probes_seq_show
863 };
864
865 static int probes_open(struct inode *inode, struct file *file)
866 {
867         int ret;
868
869         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
870                 ret = release_all_trace_kprobes();
871                 if (ret < 0)
872                         return ret;
873         }
874
875         return seq_open(file, &probes_seq_op);
876 }
877
878 static ssize_t probes_write(struct file *file, const char __user *buffer,
879                             size_t count, loff_t *ppos)
880 {
881         return traceprobe_probes_write(file, buffer, count, ppos,
882                         create_trace_kprobe);
883 }
884
885 static const struct file_operations kprobe_events_ops = {
886         .owner          = THIS_MODULE,
887         .open           = probes_open,
888         .read           = seq_read,
889         .llseek         = seq_lseek,
890         .release        = seq_release,
891         .write          = probes_write,
892 };
893
894 /* Probes profiling interfaces */
895 static int probes_profile_seq_show(struct seq_file *m, void *v)
896 {
897         struct trace_kprobe *tk = v;
898
899         seq_printf(m, "  %-44s %15lu %15lu\n",
900                    trace_event_name(&tk->tp.call),
901                    trace_kprobe_nhit(tk),
902                    tk->rp.kp.nmissed);
903
904         return 0;
905 }
906
907 static const struct seq_operations profile_seq_op = {
908         .start  = probes_seq_start,
909         .next   = probes_seq_next,
910         .stop   = probes_seq_stop,
911         .show   = probes_profile_seq_show
912 };
913
914 static int profile_open(struct inode *inode, struct file *file)
915 {
916         return seq_open(file, &profile_seq_op);
917 }
918
919 static const struct file_operations kprobe_profile_ops = {
920         .owner          = THIS_MODULE,
921         .open           = profile_open,
922         .read           = seq_read,
923         .llseek         = seq_lseek,
924         .release        = seq_release,
925 };
926
927 /* Kprobe handler */
928 static nokprobe_inline void
929 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
930                     struct trace_event_file *trace_file)
931 {
932         struct kprobe_trace_entry_head *entry;
933         struct ring_buffer_event *event;
934         struct ring_buffer *buffer;
935         int size, dsize, pc;
936         unsigned long irq_flags;
937         struct trace_event_call *call = &tk->tp.call;
938
939         WARN_ON(call != trace_file->event_call);
940
941         if (trace_trigger_soft_disabled(trace_file))
942                 return;
943
944         local_save_flags(irq_flags);
945         pc = preempt_count();
946
947         dsize = __get_data_size(&tk->tp, regs);
948         size = sizeof(*entry) + tk->tp.size + dsize;
949
950         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
951                                                 call->event.type,
952                                                 size, irq_flags, pc);
953         if (!event)
954                 return;
955
956         entry = ring_buffer_event_data(event);
957         entry->ip = (unsigned long)tk->rp.kp.addr;
958         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
959
960         event_trigger_unlock_commit_regs(trace_file, buffer, event,
961                                          entry, irq_flags, pc, regs);
962 }
963
964 static void
965 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
966 {
967         struct event_file_link *link;
968
969         list_for_each_entry_rcu(link, &tk->tp.files, list)
970                 __kprobe_trace_func(tk, regs, link->file);
971 }
972 NOKPROBE_SYMBOL(kprobe_trace_func);
973
974 /* Kretprobe handler */
975 static nokprobe_inline void
976 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
977                        struct pt_regs *regs,
978                        struct trace_event_file *trace_file)
979 {
980         struct kretprobe_trace_entry_head *entry;
981         struct ring_buffer_event *event;
982         struct ring_buffer *buffer;
983         int size, pc, dsize;
984         unsigned long irq_flags;
985         struct trace_event_call *call = &tk->tp.call;
986
987         WARN_ON(call != trace_file->event_call);
988
989         if (trace_trigger_soft_disabled(trace_file))
990                 return;
991
992         local_save_flags(irq_flags);
993         pc = preempt_count();
994
995         dsize = __get_data_size(&tk->tp, regs);
996         size = sizeof(*entry) + tk->tp.size + dsize;
997
998         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
999                                                 call->event.type,
1000                                                 size, irq_flags, pc);
1001         if (!event)
1002                 return;
1003
1004         entry = ring_buffer_event_data(event);
1005         entry->func = (unsigned long)tk->rp.kp.addr;
1006         entry->ret_ip = (unsigned long)ri->ret_addr;
1007         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1008
1009         event_trigger_unlock_commit_regs(trace_file, buffer, event,
1010                                          entry, irq_flags, pc, regs);
1011 }
1012
1013 static void
1014 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1015                      struct pt_regs *regs)
1016 {
1017         struct event_file_link *link;
1018
1019         list_for_each_entry_rcu(link, &tk->tp.files, list)
1020                 __kretprobe_trace_func(tk, ri, regs, link->file);
1021 }
1022 NOKPROBE_SYMBOL(kretprobe_trace_func);
1023
1024 /* Event entry printers */
1025 static enum print_line_t
1026 print_kprobe_event(struct trace_iterator *iter, int flags,
1027                    struct trace_event *event)
1028 {
1029         struct kprobe_trace_entry_head *field;
1030         struct trace_seq *s = &iter->seq;
1031         struct trace_probe *tp;
1032         u8 *data;
1033         int i;
1034
1035         field = (struct kprobe_trace_entry_head *)iter->ent;
1036         tp = container_of(event, struct trace_probe, call.event);
1037
1038         trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1039
1040         if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1041                 goto out;
1042
1043         trace_seq_putc(s, ')');
1044
1045         data = (u8 *)&field[1];
1046         for (i = 0; i < tp->nr_args; i++)
1047                 if (!tp->args[i].type->print(s, tp->args[i].name,
1048                                              data + tp->args[i].offset, field))
1049                         goto out;
1050
1051         trace_seq_putc(s, '\n');
1052  out:
1053         return trace_handle_return(s);
1054 }
1055
1056 static enum print_line_t
1057 print_kretprobe_event(struct trace_iterator *iter, int flags,
1058                       struct trace_event *event)
1059 {
1060         struct kretprobe_trace_entry_head *field;
1061         struct trace_seq *s = &iter->seq;
1062         struct trace_probe *tp;
1063         u8 *data;
1064         int i;
1065
1066         field = (struct kretprobe_trace_entry_head *)iter->ent;
1067         tp = container_of(event, struct trace_probe, call.event);
1068
1069         trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1070
1071         if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1072                 goto out;
1073
1074         trace_seq_puts(s, " <- ");
1075
1076         if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1077                 goto out;
1078
1079         trace_seq_putc(s, ')');
1080
1081         data = (u8 *)&field[1];
1082         for (i = 0; i < tp->nr_args; i++)
1083                 if (!tp->args[i].type->print(s, tp->args[i].name,
1084                                              data + tp->args[i].offset, field))
1085                         goto out;
1086
1087         trace_seq_putc(s, '\n');
1088
1089  out:
1090         return trace_handle_return(s);
1091 }
1092
1093
1094 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1095 {
1096         int ret, i;
1097         struct kprobe_trace_entry_head field;
1098         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1099
1100         DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1101         /* Set argument names as fields */
1102         for (i = 0; i < tk->tp.nr_args; i++) {
1103                 struct probe_arg *parg = &tk->tp.args[i];
1104
1105                 ret = trace_define_field(event_call, parg->type->fmttype,
1106                                          parg->name,
1107                                          sizeof(field) + parg->offset,
1108                                          parg->type->size,
1109                                          parg->type->is_signed,
1110                                          FILTER_OTHER);
1111                 if (ret)
1112                         return ret;
1113         }
1114         return 0;
1115 }
1116
1117 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1118 {
1119         int ret, i;
1120         struct kretprobe_trace_entry_head field;
1121         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1122
1123         DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1124         DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1125         /* Set argument names as fields */
1126         for (i = 0; i < tk->tp.nr_args; i++) {
1127                 struct probe_arg *parg = &tk->tp.args[i];
1128
1129                 ret = trace_define_field(event_call, parg->type->fmttype,
1130                                          parg->name,
1131                                          sizeof(field) + parg->offset,
1132                                          parg->type->size,
1133                                          parg->type->is_signed,
1134                                          FILTER_OTHER);
1135                 if (ret)
1136                         return ret;
1137         }
1138         return 0;
1139 }
1140
1141 #ifdef CONFIG_PERF_EVENTS
1142
1143 /* Kprobe profile handler */
1144 static void
1145 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1146 {
1147         struct trace_event_call *call = &tk->tp.call;
1148         struct bpf_prog *prog = call->prog;
1149         struct kprobe_trace_entry_head *entry;
1150         struct hlist_head *head;
1151         int size, __size, dsize;
1152         int rctx;
1153
1154         if (prog && !trace_call_bpf(prog, regs))
1155                 return;
1156
1157         head = this_cpu_ptr(call->perf_events);
1158         if (hlist_empty(head))
1159                 return;
1160
1161         dsize = __get_data_size(&tk->tp, regs);
1162         __size = sizeof(*entry) + tk->tp.size + dsize;
1163         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1164         size -= sizeof(u32);
1165
1166         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1167         if (!entry)
1168                 return;
1169
1170         entry->ip = (unsigned long)tk->rp.kp.addr;
1171         memset(&entry[1], 0, dsize);
1172         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1173         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1174                               head, NULL);
1175 }
1176 NOKPROBE_SYMBOL(kprobe_perf_func);
1177
1178 /* Kretprobe profile handler */
1179 static void
1180 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1181                     struct pt_regs *regs)
1182 {
1183         struct trace_event_call *call = &tk->tp.call;
1184         struct bpf_prog *prog = call->prog;
1185         struct kretprobe_trace_entry_head *entry;
1186         struct hlist_head *head;
1187         int size, __size, dsize;
1188         int rctx;
1189
1190         if (prog && !trace_call_bpf(prog, regs))
1191                 return;
1192
1193         head = this_cpu_ptr(call->perf_events);
1194         if (hlist_empty(head))
1195                 return;
1196
1197         dsize = __get_data_size(&tk->tp, regs);
1198         __size = sizeof(*entry) + tk->tp.size + dsize;
1199         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1200         size -= sizeof(u32);
1201
1202         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1203         if (!entry)
1204                 return;
1205
1206         entry->func = (unsigned long)tk->rp.kp.addr;
1207         entry->ret_ip = (unsigned long)ri->ret_addr;
1208         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1209         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1210                               head, NULL);
1211 }
1212 NOKPROBE_SYMBOL(kretprobe_perf_func);
1213 #endif  /* CONFIG_PERF_EVENTS */
1214
1215 /*
1216  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1217  *
1218  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1219  * lockless, but we can't race with this __init function.
1220  */
1221 static int kprobe_register(struct trace_event_call *event,
1222                            enum trace_reg type, void *data)
1223 {
1224         struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1225         struct trace_event_file *file = data;
1226
1227         switch (type) {
1228         case TRACE_REG_REGISTER:
1229                 return enable_trace_kprobe(tk, file);
1230         case TRACE_REG_UNREGISTER:
1231                 return disable_trace_kprobe(tk, file);
1232
1233 #ifdef CONFIG_PERF_EVENTS
1234         case TRACE_REG_PERF_REGISTER:
1235                 return enable_trace_kprobe(tk, NULL);
1236         case TRACE_REG_PERF_UNREGISTER:
1237                 return disable_trace_kprobe(tk, NULL);
1238         case TRACE_REG_PERF_OPEN:
1239         case TRACE_REG_PERF_CLOSE:
1240         case TRACE_REG_PERF_ADD:
1241         case TRACE_REG_PERF_DEL:
1242                 return 0;
1243 #endif
1244         }
1245         return 0;
1246 }
1247
1248 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1249 {
1250         struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1251
1252         raw_cpu_inc(*tk->nhit);
1253
1254         if (tk->tp.flags & TP_FLAG_TRACE)
1255                 kprobe_trace_func(tk, regs);
1256 #ifdef CONFIG_PERF_EVENTS
1257         if (tk->tp.flags & TP_FLAG_PROFILE)
1258                 kprobe_perf_func(tk, regs);
1259 #endif
1260         return 0;       /* We don't tweek kernel, so just return 0 */
1261 }
1262 NOKPROBE_SYMBOL(kprobe_dispatcher);
1263
1264 static int
1265 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1266 {
1267         struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1268
1269         raw_cpu_inc(*tk->nhit);
1270
1271         if (tk->tp.flags & TP_FLAG_TRACE)
1272                 kretprobe_trace_func(tk, ri, regs);
1273 #ifdef CONFIG_PERF_EVENTS
1274         if (tk->tp.flags & TP_FLAG_PROFILE)
1275                 kretprobe_perf_func(tk, ri, regs);
1276 #endif
1277         return 0;       /* We don't tweek kernel, so just return 0 */
1278 }
1279 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1280
1281 static struct trace_event_functions kretprobe_funcs = {
1282         .trace          = print_kretprobe_event
1283 };
1284
1285 static struct trace_event_functions kprobe_funcs = {
1286         .trace          = print_kprobe_event
1287 };
1288
1289 static int register_kprobe_event(struct trace_kprobe *tk)
1290 {
1291         struct trace_event_call *call = &tk->tp.call;
1292         int ret;
1293
1294         /* Initialize trace_event_call */
1295         INIT_LIST_HEAD(&call->class->fields);
1296         if (trace_kprobe_is_return(tk)) {
1297                 call->event.funcs = &kretprobe_funcs;
1298                 call->class->define_fields = kretprobe_event_define_fields;
1299         } else {
1300                 call->event.funcs = &kprobe_funcs;
1301                 call->class->define_fields = kprobe_event_define_fields;
1302         }
1303         if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1304                 return -ENOMEM;
1305         ret = register_trace_event(&call->event);
1306         if (!ret) {
1307                 kfree(call->print_fmt);
1308                 return -ENODEV;
1309         }
1310         call->flags = TRACE_EVENT_FL_KPROBE;
1311         call->class->reg = kprobe_register;
1312         call->data = tk;
1313         ret = trace_add_event_call(call);
1314         if (ret) {
1315                 pr_info("Failed to register kprobe event: %s\n",
1316                         trace_event_name(call));
1317                 kfree(call->print_fmt);
1318                 unregister_trace_event(&call->event);
1319         }
1320         return ret;
1321 }
1322
1323 static int unregister_kprobe_event(struct trace_kprobe *tk)
1324 {
1325         int ret;
1326
1327         /* tp->event is unregistered in trace_remove_event_call() */
1328         ret = trace_remove_event_call(&tk->tp.call);
1329         if (!ret)
1330                 kfree(tk->tp.call.print_fmt);
1331         return ret;
1332 }
1333
1334 /* Make a tracefs interface for controlling probe points */
1335 static __init int init_kprobe_trace(void)
1336 {
1337         struct dentry *d_tracer;
1338         struct dentry *entry;
1339
1340         if (register_module_notifier(&trace_kprobe_module_nb))
1341                 return -EINVAL;
1342
1343         d_tracer = tracing_init_dentry();
1344         if (IS_ERR(d_tracer))
1345                 return 0;
1346
1347         entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1348                                     NULL, &kprobe_events_ops);
1349
1350         /* Event list interface */
1351         if (!entry)
1352                 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1353
1354         /* Profile interface */
1355         entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1356                                     NULL, &kprobe_profile_ops);
1357
1358         if (!entry)
1359                 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1360         return 0;
1361 }
1362 fs_initcall(init_kprobe_trace);
1363
1364
1365 #ifdef CONFIG_FTRACE_STARTUP_TEST
1366 /*
1367  * The "__used" keeps gcc from removing the function symbol
1368  * from the kallsyms table. 'noinline' makes sure that there
1369  * isn't an inlined version used by the test method below
1370  */
1371 static __used __init noinline int
1372 kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
1373 {
1374         return a1 + a2 + a3 + a4 + a5 + a6;
1375 }
1376
1377 static __init struct trace_event_file *
1378 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1379 {
1380         struct trace_event_file *file;
1381
1382         list_for_each_entry(file, &tr->events, list)
1383                 if (file->event_call == &tk->tp.call)
1384                         return file;
1385
1386         return NULL;
1387 }
1388
1389 /*
1390  * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1391  * stage, we can do this lockless.
1392  */
1393 static __init int kprobe_trace_self_tests_init(void)
1394 {
1395         int ret, warn = 0;
1396         int (*target)(int, int, int, int, int, int);
1397         struct trace_kprobe *tk;
1398         struct trace_event_file *file;
1399
1400         if (tracing_is_disabled())
1401                 return -ENODEV;
1402
1403         target = kprobe_trace_selftest_target;
1404
1405         pr_info("Testing kprobe tracing: ");
1406
1407         ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
1408                                   "$stack $stack0 +0($stack)",
1409                                   create_trace_kprobe);
1410         if (WARN_ON_ONCE(ret)) {
1411                 pr_warn("error on probing function entry.\n");
1412                 warn++;
1413         } else {
1414                 /* Enable trace point */
1415                 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1416                 if (WARN_ON_ONCE(tk == NULL)) {
1417                         pr_warn("error on getting new probe.\n");
1418                         warn++;
1419                 } else {
1420                         file = find_trace_probe_file(tk, top_trace_array());
1421                         if (WARN_ON_ONCE(file == NULL)) {
1422                                 pr_warn("error on getting probe file.\n");
1423                                 warn++;
1424                         } else
1425                                 enable_trace_kprobe(tk, file);
1426                 }
1427         }
1428
1429         ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1430                                   "$retval", create_trace_kprobe);
1431         if (WARN_ON_ONCE(ret)) {
1432                 pr_warn("error on probing function return.\n");
1433                 warn++;
1434         } else {
1435                 /* Enable trace point */
1436                 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1437                 if (WARN_ON_ONCE(tk == NULL)) {
1438                         pr_warn("error on getting 2nd new probe.\n");
1439                         warn++;
1440                 } else {
1441                         file = find_trace_probe_file(tk, top_trace_array());
1442                         if (WARN_ON_ONCE(file == NULL)) {
1443                                 pr_warn("error on getting probe file.\n");
1444                                 warn++;
1445                         } else
1446                                 enable_trace_kprobe(tk, file);
1447                 }
1448         }
1449
1450         if (warn)
1451                 goto end;
1452
1453         ret = target(1, 2, 3, 4, 5, 6);
1454
1455         /*
1456          * Not expecting an error here, the check is only to prevent the
1457          * optimizer from removing the call to target() as otherwise there
1458          * are no side-effects and the call is never performed.
1459          */
1460         if (ret != 21)
1461                 warn++;
1462
1463         /* Disable trace points before removing it */
1464         tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1465         if (WARN_ON_ONCE(tk == NULL)) {
1466                 pr_warn("error on getting test probe.\n");
1467                 warn++;
1468         } else {
1469                 if (trace_kprobe_nhit(tk) != 1) {
1470                         pr_warn("incorrect number of testprobe hits\n");
1471                         warn++;
1472                 }
1473
1474                 file = find_trace_probe_file(tk, top_trace_array());
1475                 if (WARN_ON_ONCE(file == NULL)) {
1476                         pr_warn("error on getting probe file.\n");
1477                         warn++;
1478                 } else
1479                         disable_trace_kprobe(tk, file);
1480         }
1481
1482         tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1483         if (WARN_ON_ONCE(tk == NULL)) {
1484                 pr_warn("error on getting 2nd test probe.\n");
1485                 warn++;
1486         } else {
1487                 if (trace_kprobe_nhit(tk) != 1) {
1488                         pr_warn("incorrect number of testprobe2 hits\n");
1489                         warn++;
1490                 }
1491
1492                 file = find_trace_probe_file(tk, top_trace_array());
1493                 if (WARN_ON_ONCE(file == NULL)) {
1494                         pr_warn("error on getting probe file.\n");
1495                         warn++;
1496                 } else
1497                         disable_trace_kprobe(tk, file);
1498         }
1499
1500         ret = traceprobe_command("-:testprobe", create_trace_kprobe);
1501         if (WARN_ON_ONCE(ret)) {
1502                 pr_warn("error on deleting a probe.\n");
1503                 warn++;
1504         }
1505
1506         ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
1507         if (WARN_ON_ONCE(ret)) {
1508                 pr_warn("error on deleting a probe.\n");
1509                 warn++;
1510         }
1511
1512 end:
1513         release_all_trace_kprobes();
1514         if (warn)
1515                 pr_cont("NG: Some tests are failed. Please check them.\n");
1516         else
1517                 pr_cont("OK\n");
1518         return 0;
1519 }
1520
1521 late_initcall(kprobe_trace_self_tests_init);
1522
1523 #endif