]> git.karo-electronics.de Git - karo-tx-linux.git/blob - kernel/trace/trace_kprobe.c
scsi: qedi: Fix return code in qedi_ep_connect()
[karo-tx-linux.git] / kernel / trace / trace_kprobe.c
1 /*
2  * Kprobes-based tracing events
3  *
4  * Created by Masami Hiramatsu <mhiramat@redhat.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19 #define pr_fmt(fmt)     "trace_kprobe: " fmt
20
21 #include <linux/module.h>
22 #include <linux/uaccess.h>
23 #include <linux/rculist.h>
24
25 #include "trace_probe.h"
26
27 #define KPROBE_EVENT_SYSTEM "kprobes"
28 #define KRETPROBE_MAXACTIVE_MAX 4096
29
30 /**
31  * Kprobe event core functions
32  */
33 struct trace_kprobe {
34         struct list_head        list;
35         struct kretprobe        rp;     /* Use rp.kp for kprobe use */
36         unsigned long __percpu *nhit;
37         const char              *symbol;        /* symbol name */
38         struct trace_probe      tp;
39 };
40
41 #define SIZEOF_TRACE_KPROBE(n)                          \
42         (offsetof(struct trace_kprobe, tp.args) +       \
43         (sizeof(struct probe_arg) * (n)))
44
45
46 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
47 {
48         return tk->rp.handler != NULL;
49 }
50
51 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
52 {
53         return tk->symbol ? tk->symbol : "unknown";
54 }
55
56 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
57 {
58         return tk->rp.kp.offset;
59 }
60
61 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
62 {
63         return !!(kprobe_gone(&tk->rp.kp));
64 }
65
66 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
67                                                  struct module *mod)
68 {
69         int len = strlen(mod->name);
70         const char *name = trace_kprobe_symbol(tk);
71         return strncmp(mod->name, name, len) == 0 && name[len] == ':';
72 }
73
74 static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
75 {
76         return !!strchr(trace_kprobe_symbol(tk), ':');
77 }
78
79 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
80 {
81         unsigned long nhit = 0;
82         int cpu;
83
84         for_each_possible_cpu(cpu)
85                 nhit += *per_cpu_ptr(tk->nhit, cpu);
86
87         return nhit;
88 }
89
90 static int register_kprobe_event(struct trace_kprobe *tk);
91 static int unregister_kprobe_event(struct trace_kprobe *tk);
92
93 static DEFINE_MUTEX(probe_lock);
94 static LIST_HEAD(probe_list);
95
96 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
97 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
98                                 struct pt_regs *regs);
99
100 /* Memory fetching by symbol */
101 struct symbol_cache {
102         char            *symbol;
103         long            offset;
104         unsigned long   addr;
105 };
106
107 unsigned long update_symbol_cache(struct symbol_cache *sc)
108 {
109         sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
110
111         if (sc->addr)
112                 sc->addr += sc->offset;
113
114         return sc->addr;
115 }
116
117 void free_symbol_cache(struct symbol_cache *sc)
118 {
119         kfree(sc->symbol);
120         kfree(sc);
121 }
122
123 struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
124 {
125         struct symbol_cache *sc;
126
127         if (!sym || strlen(sym) == 0)
128                 return NULL;
129
130         sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
131         if (!sc)
132                 return NULL;
133
134         sc->symbol = kstrdup(sym, GFP_KERNEL);
135         if (!sc->symbol) {
136                 kfree(sc);
137                 return NULL;
138         }
139         sc->offset = offset;
140         update_symbol_cache(sc);
141
142         return sc;
143 }
144
145 /*
146  * Kprobes-specific fetch functions
147  */
148 #define DEFINE_FETCH_stack(type)                                        \
149 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,          \
150                                           void *offset, void *dest)     \
151 {                                                                       \
152         *(type *)dest = (type)regs_get_kernel_stack_nth(regs,           \
153                                 (unsigned int)((unsigned long)offset)); \
154 }                                                                       \
155 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
156
157 DEFINE_BASIC_FETCH_FUNCS(stack)
158 /* No string on the stack entry */
159 #define fetch_stack_string      NULL
160 #define fetch_stack_string_size NULL
161
162 #define DEFINE_FETCH_memory(type)                                       \
163 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,         \
164                                           void *addr, void *dest)       \
165 {                                                                       \
166         type retval;                                                    \
167         if (probe_kernel_address(addr, retval))                         \
168                 *(type *)dest = 0;                                      \
169         else                                                            \
170                 *(type *)dest = retval;                                 \
171 }                                                                       \
172 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
173
174 DEFINE_BASIC_FETCH_FUNCS(memory)
175 /*
176  * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
177  * length and relative data location.
178  */
179 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
180                                             void *addr, void *dest)
181 {
182         int maxlen = get_rloc_len(*(u32 *)dest);
183         u8 *dst = get_rloc_data(dest);
184         long ret;
185
186         if (!maxlen)
187                 return;
188
189         /*
190          * Try to get string again, since the string can be changed while
191          * probing.
192          */
193         ret = strncpy_from_unsafe(dst, addr, maxlen);
194
195         if (ret < 0) {  /* Failed to fetch string */
196                 dst[0] = '\0';
197                 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
198         } else {
199                 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
200         }
201 }
202 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
203
204 /* Return the length of string -- including null terminal byte */
205 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
206                                                  void *addr, void *dest)
207 {
208         mm_segment_t old_fs;
209         int ret, len = 0;
210         u8 c;
211
212         old_fs = get_fs();
213         set_fs(KERNEL_DS);
214         pagefault_disable();
215
216         do {
217                 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
218                 len++;
219         } while (c && ret == 0 && len < MAX_STRING_SIZE);
220
221         pagefault_enable();
222         set_fs(old_fs);
223
224         if (ret < 0)    /* Failed to check the length */
225                 *(u32 *)dest = 0;
226         else
227                 *(u32 *)dest = len;
228 }
229 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
230
231 #define DEFINE_FETCH_symbol(type)                                       \
232 void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
233 {                                                                       \
234         struct symbol_cache *sc = data;                                 \
235         if (sc->addr)                                                   \
236                 fetch_memory_##type(regs, (void *)sc->addr, dest);      \
237         else                                                            \
238                 *(type *)dest = 0;                                      \
239 }                                                                       \
240 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
241
242 DEFINE_BASIC_FETCH_FUNCS(symbol)
243 DEFINE_FETCH_symbol(string)
244 DEFINE_FETCH_symbol(string_size)
245
246 /* kprobes don't support file_offset fetch methods */
247 #define fetch_file_offset_u8            NULL
248 #define fetch_file_offset_u16           NULL
249 #define fetch_file_offset_u32           NULL
250 #define fetch_file_offset_u64           NULL
251 #define fetch_file_offset_string        NULL
252 #define fetch_file_offset_string_size   NULL
253
254 /* Fetch type information table */
255 static const struct fetch_type kprobes_fetch_type_table[] = {
256         /* Special types */
257         [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
258                                         sizeof(u32), 1, "__data_loc char[]"),
259         [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
260                                         string_size, sizeof(u32), 0, "u32"),
261         /* Basic types */
262         ASSIGN_FETCH_TYPE(u8,  u8,  0),
263         ASSIGN_FETCH_TYPE(u16, u16, 0),
264         ASSIGN_FETCH_TYPE(u32, u32, 0),
265         ASSIGN_FETCH_TYPE(u64, u64, 0),
266         ASSIGN_FETCH_TYPE(s8,  u8,  1),
267         ASSIGN_FETCH_TYPE(s16, u16, 1),
268         ASSIGN_FETCH_TYPE(s32, u32, 1),
269         ASSIGN_FETCH_TYPE(s64, u64, 1),
270         ASSIGN_FETCH_TYPE_ALIAS(x8,  u8,  u8,  0),
271         ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
272         ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
273         ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
274
275         ASSIGN_FETCH_TYPE_END
276 };
277
278 /*
279  * Allocate new trace_probe and initialize it (including kprobes).
280  */
281 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
282                                              const char *event,
283                                              void *addr,
284                                              const char *symbol,
285                                              unsigned long offs,
286                                              int maxactive,
287                                              int nargs, bool is_return)
288 {
289         struct trace_kprobe *tk;
290         int ret = -ENOMEM;
291
292         tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
293         if (!tk)
294                 return ERR_PTR(ret);
295
296         tk->nhit = alloc_percpu(unsigned long);
297         if (!tk->nhit)
298                 goto error;
299
300         if (symbol) {
301                 tk->symbol = kstrdup(symbol, GFP_KERNEL);
302                 if (!tk->symbol)
303                         goto error;
304                 tk->rp.kp.symbol_name = tk->symbol;
305                 tk->rp.kp.offset = offs;
306         } else
307                 tk->rp.kp.addr = addr;
308
309         if (is_return)
310                 tk->rp.handler = kretprobe_dispatcher;
311         else
312                 tk->rp.kp.pre_handler = kprobe_dispatcher;
313
314         tk->rp.maxactive = maxactive;
315
316         if (!event || !is_good_name(event)) {
317                 ret = -EINVAL;
318                 goto error;
319         }
320
321         tk->tp.call.class = &tk->tp.class;
322         tk->tp.call.name = kstrdup(event, GFP_KERNEL);
323         if (!tk->tp.call.name)
324                 goto error;
325
326         if (!group || !is_good_name(group)) {
327                 ret = -EINVAL;
328                 goto error;
329         }
330
331         tk->tp.class.system = kstrdup(group, GFP_KERNEL);
332         if (!tk->tp.class.system)
333                 goto error;
334
335         INIT_LIST_HEAD(&tk->list);
336         INIT_LIST_HEAD(&tk->tp.files);
337         return tk;
338 error:
339         kfree(tk->tp.call.name);
340         kfree(tk->symbol);
341         free_percpu(tk->nhit);
342         kfree(tk);
343         return ERR_PTR(ret);
344 }
345
346 static void free_trace_kprobe(struct trace_kprobe *tk)
347 {
348         int i;
349
350         for (i = 0; i < tk->tp.nr_args; i++)
351                 traceprobe_free_probe_arg(&tk->tp.args[i]);
352
353         kfree(tk->tp.call.class->system);
354         kfree(tk->tp.call.name);
355         kfree(tk->symbol);
356         free_percpu(tk->nhit);
357         kfree(tk);
358 }
359
360 static struct trace_kprobe *find_trace_kprobe(const char *event,
361                                               const char *group)
362 {
363         struct trace_kprobe *tk;
364
365         list_for_each_entry(tk, &probe_list, list)
366                 if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
367                     strcmp(tk->tp.call.class->system, group) == 0)
368                         return tk;
369         return NULL;
370 }
371
372 /*
373  * Enable trace_probe
374  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
375  */
376 static int
377 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
378 {
379         int ret = 0;
380
381         if (file) {
382                 struct event_file_link *link;
383
384                 link = kmalloc(sizeof(*link), GFP_KERNEL);
385                 if (!link) {
386                         ret = -ENOMEM;
387                         goto out;
388                 }
389
390                 link->file = file;
391                 list_add_tail_rcu(&link->list, &tk->tp.files);
392
393                 tk->tp.flags |= TP_FLAG_TRACE;
394         } else
395                 tk->tp.flags |= TP_FLAG_PROFILE;
396
397         if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
398                 if (trace_kprobe_is_return(tk))
399                         ret = enable_kretprobe(&tk->rp);
400                 else
401                         ret = enable_kprobe(&tk->rp.kp);
402         }
403  out:
404         return ret;
405 }
406
407 /*
408  * Disable trace_probe
409  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
410  */
411 static int
412 disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
413 {
414         struct event_file_link *link = NULL;
415         int wait = 0;
416         int ret = 0;
417
418         if (file) {
419                 link = find_event_file_link(&tk->tp, file);
420                 if (!link) {
421                         ret = -EINVAL;
422                         goto out;
423                 }
424
425                 list_del_rcu(&link->list);
426                 wait = 1;
427                 if (!list_empty(&tk->tp.files))
428                         goto out;
429
430                 tk->tp.flags &= ~TP_FLAG_TRACE;
431         } else
432                 tk->tp.flags &= ~TP_FLAG_PROFILE;
433
434         if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
435                 if (trace_kprobe_is_return(tk))
436                         disable_kretprobe(&tk->rp);
437                 else
438                         disable_kprobe(&tk->rp.kp);
439                 wait = 1;
440         }
441  out:
442         if (wait) {
443                 /*
444                  * Synchronize with kprobe_trace_func/kretprobe_trace_func
445                  * to ensure disabled (all running handlers are finished).
446                  * This is not only for kfree(), but also the caller,
447                  * trace_remove_event_call() supposes it for releasing
448                  * event_call related objects, which will be accessed in
449                  * the kprobe_trace_func/kretprobe_trace_func.
450                  */
451                 synchronize_sched();
452                 kfree(link);    /* Ignored if link == NULL */
453         }
454
455         return ret;
456 }
457
458 /* Internal register function - just handle k*probes and flags */
459 static int __register_trace_kprobe(struct trace_kprobe *tk)
460 {
461         int i, ret;
462
463         if (trace_probe_is_registered(&tk->tp))
464                 return -EINVAL;
465
466         for (i = 0; i < tk->tp.nr_args; i++)
467                 traceprobe_update_arg(&tk->tp.args[i]);
468
469         /* Set/clear disabled flag according to tp->flag */
470         if (trace_probe_is_enabled(&tk->tp))
471                 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
472         else
473                 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
474
475         if (trace_kprobe_is_return(tk))
476                 ret = register_kretprobe(&tk->rp);
477         else
478                 ret = register_kprobe(&tk->rp.kp);
479
480         if (ret == 0)
481                 tk->tp.flags |= TP_FLAG_REGISTERED;
482         else {
483                 pr_warn("Could not insert probe at %s+%lu: %d\n",
484                         trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
485                 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
486                         pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
487                         ret = 0;
488                 } else if (ret == -EILSEQ) {
489                         pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
490                                 tk->rp.kp.addr);
491                         ret = -EINVAL;
492                 }
493         }
494
495         return ret;
496 }
497
498 /* Internal unregister function - just handle k*probes and flags */
499 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
500 {
501         if (trace_probe_is_registered(&tk->tp)) {
502                 if (trace_kprobe_is_return(tk))
503                         unregister_kretprobe(&tk->rp);
504                 else
505                         unregister_kprobe(&tk->rp.kp);
506                 tk->tp.flags &= ~TP_FLAG_REGISTERED;
507                 /* Cleanup kprobe for reuse */
508                 if (tk->rp.kp.symbol_name)
509                         tk->rp.kp.addr = NULL;
510         }
511 }
512
513 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
514 static int unregister_trace_kprobe(struct trace_kprobe *tk)
515 {
516         /* Enabled event can not be unregistered */
517         if (trace_probe_is_enabled(&tk->tp))
518                 return -EBUSY;
519
520         /* Will fail if probe is being used by ftrace or perf */
521         if (unregister_kprobe_event(tk))
522                 return -EBUSY;
523
524         __unregister_trace_kprobe(tk);
525         list_del(&tk->list);
526
527         return 0;
528 }
529
530 /* Register a trace_probe and probe_event */
531 static int register_trace_kprobe(struct trace_kprobe *tk)
532 {
533         struct trace_kprobe *old_tk;
534         int ret;
535
536         mutex_lock(&probe_lock);
537
538         /* Delete old (same name) event if exist */
539         old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
540                         tk->tp.call.class->system);
541         if (old_tk) {
542                 ret = unregister_trace_kprobe(old_tk);
543                 if (ret < 0)
544                         goto end;
545                 free_trace_kprobe(old_tk);
546         }
547
548         /* Register new event */
549         ret = register_kprobe_event(tk);
550         if (ret) {
551                 pr_warn("Failed to register probe event(%d)\n", ret);
552                 goto end;
553         }
554
555         /* Register k*probe */
556         ret = __register_trace_kprobe(tk);
557         if (ret < 0)
558                 unregister_kprobe_event(tk);
559         else
560                 list_add_tail(&tk->list, &probe_list);
561
562 end:
563         mutex_unlock(&probe_lock);
564         return ret;
565 }
566
567 /* Module notifier call back, checking event on the module */
568 static int trace_kprobe_module_callback(struct notifier_block *nb,
569                                        unsigned long val, void *data)
570 {
571         struct module *mod = data;
572         struct trace_kprobe *tk;
573         int ret;
574
575         if (val != MODULE_STATE_COMING)
576                 return NOTIFY_DONE;
577
578         /* Update probes on coming module */
579         mutex_lock(&probe_lock);
580         list_for_each_entry(tk, &probe_list, list) {
581                 if (trace_kprobe_within_module(tk, mod)) {
582                         /* Don't need to check busy - this should have gone. */
583                         __unregister_trace_kprobe(tk);
584                         ret = __register_trace_kprobe(tk);
585                         if (ret)
586                                 pr_warn("Failed to re-register probe %s on %s: %d\n",
587                                         trace_event_name(&tk->tp.call),
588                                         mod->name, ret);
589                 }
590         }
591         mutex_unlock(&probe_lock);
592
593         return NOTIFY_DONE;
594 }
595
596 static struct notifier_block trace_kprobe_module_nb = {
597         .notifier_call = trace_kprobe_module_callback,
598         .priority = 1   /* Invoked after kprobe module callback */
599 };
600
601 static int create_trace_kprobe(int argc, char **argv)
602 {
603         /*
604          * Argument syntax:
605          *  - Add kprobe:
606          *      p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
607          *  - Add kretprobe:
608          *      r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
609          * Fetch args:
610          *  $retval     : fetch return value
611          *  $stack      : fetch stack address
612          *  $stackN     : fetch Nth of stack (N:0-)
613          *  $comm       : fetch current task comm
614          *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
615          *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
616          *  %REG        : fetch register REG
617          * Dereferencing memory fetch:
618          *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
619          * Alias name of args:
620          *  NAME=FETCHARG : set NAME as alias of FETCHARG.
621          * Type of args:
622          *  FETCHARG:TYPE : use TYPE instead of unsigned long.
623          */
624         struct trace_kprobe *tk;
625         int i, ret = 0;
626         bool is_return = false, is_delete = false;
627         char *symbol = NULL, *event = NULL, *group = NULL;
628         int maxactive = 0;
629         char *arg;
630         unsigned long offset = 0;
631         void *addr = NULL;
632         char buf[MAX_EVENT_NAME_LEN];
633
634         /* argc must be >= 1 */
635         if (argv[0][0] == 'p')
636                 is_return = false;
637         else if (argv[0][0] == 'r')
638                 is_return = true;
639         else if (argv[0][0] == '-')
640                 is_delete = true;
641         else {
642                 pr_info("Probe definition must be started with 'p', 'r' or"
643                         " '-'.\n");
644                 return -EINVAL;
645         }
646
647         event = strchr(&argv[0][1], ':');
648         if (event) {
649                 event[0] = '\0';
650                 event++;
651         }
652         if (is_return && isdigit(argv[0][1])) {
653                 ret = kstrtouint(&argv[0][1], 0, &maxactive);
654                 if (ret) {
655                         pr_info("Failed to parse maxactive.\n");
656                         return ret;
657                 }
658                 /* kretprobes instances are iterated over via a list. The
659                  * maximum should stay reasonable.
660                  */
661                 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
662                         pr_info("Maxactive is too big (%d > %d).\n",
663                                 maxactive, KRETPROBE_MAXACTIVE_MAX);
664                         return -E2BIG;
665                 }
666         }
667
668         if (event) {
669                 if (strchr(event, '/')) {
670                         group = event;
671                         event = strchr(group, '/') + 1;
672                         event[-1] = '\0';
673                         if (strlen(group) == 0) {
674                                 pr_info("Group name is not specified\n");
675                                 return -EINVAL;
676                         }
677                 }
678                 if (strlen(event) == 0) {
679                         pr_info("Event name is not specified\n");
680                         return -EINVAL;
681                 }
682         }
683         if (!group)
684                 group = KPROBE_EVENT_SYSTEM;
685
686         if (is_delete) {
687                 if (!event) {
688                         pr_info("Delete command needs an event name.\n");
689                         return -EINVAL;
690                 }
691                 mutex_lock(&probe_lock);
692                 tk = find_trace_kprobe(event, group);
693                 if (!tk) {
694                         mutex_unlock(&probe_lock);
695                         pr_info("Event %s/%s doesn't exist.\n", group, event);
696                         return -ENOENT;
697                 }
698                 /* delete an event */
699                 ret = unregister_trace_kprobe(tk);
700                 if (ret == 0)
701                         free_trace_kprobe(tk);
702                 mutex_unlock(&probe_lock);
703                 return ret;
704         }
705
706         if (argc < 2) {
707                 pr_info("Probe point is not specified.\n");
708                 return -EINVAL;
709         }
710
711         /* try to parse an address. if that fails, try to read the
712          * input as a symbol. */
713         if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
714                 /* a symbol specified */
715                 symbol = argv[1];
716                 /* TODO: support .init module functions */
717                 ret = traceprobe_split_symbol_offset(symbol, &offset);
718                 if (ret) {
719                         pr_info("Failed to parse either an address or a symbol.\n");
720                         return ret;
721                 }
722                 if (offset && is_return &&
723                     !kprobe_on_func_entry(NULL, symbol, offset)) {
724                         pr_info("Given offset is not valid for return probe.\n");
725                         return -EINVAL;
726                 }
727         }
728         argc -= 2; argv += 2;
729
730         /* setup a probe */
731         if (!event) {
732                 /* Make a new event name */
733                 if (symbol)
734                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
735                                  is_return ? 'r' : 'p', symbol, offset);
736                 else
737                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
738                                  is_return ? 'r' : 'p', addr);
739                 event = buf;
740         }
741         tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
742                                argc, is_return);
743         if (IS_ERR(tk)) {
744                 pr_info("Failed to allocate trace_probe.(%d)\n",
745                         (int)PTR_ERR(tk));
746                 return PTR_ERR(tk);
747         }
748
749         /* parse arguments */
750         ret = 0;
751         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
752                 struct probe_arg *parg = &tk->tp.args[i];
753
754                 /* Increment count for freeing args in error case */
755                 tk->tp.nr_args++;
756
757                 /* Parse argument name */
758                 arg = strchr(argv[i], '=');
759                 if (arg) {
760                         *arg++ = '\0';
761                         parg->name = kstrdup(argv[i], GFP_KERNEL);
762                 } else {
763                         arg = argv[i];
764                         /* If argument name is omitted, set "argN" */
765                         snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
766                         parg->name = kstrdup(buf, GFP_KERNEL);
767                 }
768
769                 if (!parg->name) {
770                         pr_info("Failed to allocate argument[%d] name.\n", i);
771                         ret = -ENOMEM;
772                         goto error;
773                 }
774
775                 if (!is_good_name(parg->name)) {
776                         pr_info("Invalid argument[%d] name: %s\n",
777                                 i, parg->name);
778                         ret = -EINVAL;
779                         goto error;
780                 }
781
782                 if (traceprobe_conflict_field_name(parg->name,
783                                                         tk->tp.args, i)) {
784                         pr_info("Argument[%d] name '%s' conflicts with "
785                                 "another field.\n", i, argv[i]);
786                         ret = -EINVAL;
787                         goto error;
788                 }
789
790                 /* Parse fetch argument */
791                 ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
792                                                 is_return, true,
793                                                 kprobes_fetch_type_table);
794                 if (ret) {
795                         pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
796                         goto error;
797                 }
798         }
799
800         ret = register_trace_kprobe(tk);
801         if (ret)
802                 goto error;
803         return 0;
804
805 error:
806         free_trace_kprobe(tk);
807         return ret;
808 }
809
810 static int release_all_trace_kprobes(void)
811 {
812         struct trace_kprobe *tk;
813         int ret = 0;
814
815         mutex_lock(&probe_lock);
816         /* Ensure no probe is in use. */
817         list_for_each_entry(tk, &probe_list, list)
818                 if (trace_probe_is_enabled(&tk->tp)) {
819                         ret = -EBUSY;
820                         goto end;
821                 }
822         /* TODO: Use batch unregistration */
823         while (!list_empty(&probe_list)) {
824                 tk = list_entry(probe_list.next, struct trace_kprobe, list);
825                 ret = unregister_trace_kprobe(tk);
826                 if (ret)
827                         goto end;
828                 free_trace_kprobe(tk);
829         }
830
831 end:
832         mutex_unlock(&probe_lock);
833
834         return ret;
835 }
836
837 /* Probes listing interfaces */
838 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
839 {
840         mutex_lock(&probe_lock);
841         return seq_list_start(&probe_list, *pos);
842 }
843
844 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
845 {
846         return seq_list_next(v, &probe_list, pos);
847 }
848
849 static void probes_seq_stop(struct seq_file *m, void *v)
850 {
851         mutex_unlock(&probe_lock);
852 }
853
854 static int probes_seq_show(struct seq_file *m, void *v)
855 {
856         struct trace_kprobe *tk = v;
857         int i;
858
859         seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
860         seq_printf(m, ":%s/%s", tk->tp.call.class->system,
861                         trace_event_name(&tk->tp.call));
862
863         if (!tk->symbol)
864                 seq_printf(m, " 0x%p", tk->rp.kp.addr);
865         else if (tk->rp.kp.offset)
866                 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
867                            tk->rp.kp.offset);
868         else
869                 seq_printf(m, " %s", trace_kprobe_symbol(tk));
870
871         for (i = 0; i < tk->tp.nr_args; i++)
872                 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
873         seq_putc(m, '\n');
874
875         return 0;
876 }
877
878 static const struct seq_operations probes_seq_op = {
879         .start  = probes_seq_start,
880         .next   = probes_seq_next,
881         .stop   = probes_seq_stop,
882         .show   = probes_seq_show
883 };
884
885 static int probes_open(struct inode *inode, struct file *file)
886 {
887         int ret;
888
889         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
890                 ret = release_all_trace_kprobes();
891                 if (ret < 0)
892                         return ret;
893         }
894
895         return seq_open(file, &probes_seq_op);
896 }
897
898 static ssize_t probes_write(struct file *file, const char __user *buffer,
899                             size_t count, loff_t *ppos)
900 {
901         return traceprobe_probes_write(file, buffer, count, ppos,
902                         create_trace_kprobe);
903 }
904
905 static const struct file_operations kprobe_events_ops = {
906         .owner          = THIS_MODULE,
907         .open           = probes_open,
908         .read           = seq_read,
909         .llseek         = seq_lseek,
910         .release        = seq_release,
911         .write          = probes_write,
912 };
913
914 /* Probes profiling interfaces */
915 static int probes_profile_seq_show(struct seq_file *m, void *v)
916 {
917         struct trace_kprobe *tk = v;
918
919         seq_printf(m, "  %-44s %15lu %15lu\n",
920                    trace_event_name(&tk->tp.call),
921                    trace_kprobe_nhit(tk),
922                    tk->rp.kp.nmissed);
923
924         return 0;
925 }
926
927 static const struct seq_operations profile_seq_op = {
928         .start  = probes_seq_start,
929         .next   = probes_seq_next,
930         .stop   = probes_seq_stop,
931         .show   = probes_profile_seq_show
932 };
933
934 static int profile_open(struct inode *inode, struct file *file)
935 {
936         return seq_open(file, &profile_seq_op);
937 }
938
939 static const struct file_operations kprobe_profile_ops = {
940         .owner          = THIS_MODULE,
941         .open           = profile_open,
942         .read           = seq_read,
943         .llseek         = seq_lseek,
944         .release        = seq_release,
945 };
946
947 /* Kprobe handler */
948 static nokprobe_inline void
949 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
950                     struct trace_event_file *trace_file)
951 {
952         struct kprobe_trace_entry_head *entry;
953         struct ring_buffer_event *event;
954         struct ring_buffer *buffer;
955         int size, dsize, pc;
956         unsigned long irq_flags;
957         struct trace_event_call *call = &tk->tp.call;
958
959         WARN_ON(call != trace_file->event_call);
960
961         if (trace_trigger_soft_disabled(trace_file))
962                 return;
963
964         local_save_flags(irq_flags);
965         pc = preempt_count();
966
967         dsize = __get_data_size(&tk->tp, regs);
968         size = sizeof(*entry) + tk->tp.size + dsize;
969
970         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
971                                                 call->event.type,
972                                                 size, irq_flags, pc);
973         if (!event)
974                 return;
975
976         entry = ring_buffer_event_data(event);
977         entry->ip = (unsigned long)tk->rp.kp.addr;
978         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
979
980         event_trigger_unlock_commit_regs(trace_file, buffer, event,
981                                          entry, irq_flags, pc, regs);
982 }
983
984 static void
985 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
986 {
987         struct event_file_link *link;
988
989         list_for_each_entry_rcu(link, &tk->tp.files, list)
990                 __kprobe_trace_func(tk, regs, link->file);
991 }
992 NOKPROBE_SYMBOL(kprobe_trace_func);
993
994 /* Kretprobe handler */
995 static nokprobe_inline void
996 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
997                        struct pt_regs *regs,
998                        struct trace_event_file *trace_file)
999 {
1000         struct kretprobe_trace_entry_head *entry;
1001         struct ring_buffer_event *event;
1002         struct ring_buffer *buffer;
1003         int size, pc, dsize;
1004         unsigned long irq_flags;
1005         struct trace_event_call *call = &tk->tp.call;
1006
1007         WARN_ON(call != trace_file->event_call);
1008
1009         if (trace_trigger_soft_disabled(trace_file))
1010                 return;
1011
1012         local_save_flags(irq_flags);
1013         pc = preempt_count();
1014
1015         dsize = __get_data_size(&tk->tp, regs);
1016         size = sizeof(*entry) + tk->tp.size + dsize;
1017
1018         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1019                                                 call->event.type,
1020                                                 size, irq_flags, pc);
1021         if (!event)
1022                 return;
1023
1024         entry = ring_buffer_event_data(event);
1025         entry->func = (unsigned long)tk->rp.kp.addr;
1026         entry->ret_ip = (unsigned long)ri->ret_addr;
1027         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1028
1029         event_trigger_unlock_commit_regs(trace_file, buffer, event,
1030                                          entry, irq_flags, pc, regs);
1031 }
1032
1033 static void
1034 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1035                      struct pt_regs *regs)
1036 {
1037         struct event_file_link *link;
1038
1039         list_for_each_entry_rcu(link, &tk->tp.files, list)
1040                 __kretprobe_trace_func(tk, ri, regs, link->file);
1041 }
1042 NOKPROBE_SYMBOL(kretprobe_trace_func);
1043
1044 /* Event entry printers */
1045 static enum print_line_t
1046 print_kprobe_event(struct trace_iterator *iter, int flags,
1047                    struct trace_event *event)
1048 {
1049         struct kprobe_trace_entry_head *field;
1050         struct trace_seq *s = &iter->seq;
1051         struct trace_probe *tp;
1052         u8 *data;
1053         int i;
1054
1055         field = (struct kprobe_trace_entry_head *)iter->ent;
1056         tp = container_of(event, struct trace_probe, call.event);
1057
1058         trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1059
1060         if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1061                 goto out;
1062
1063         trace_seq_putc(s, ')');
1064
1065         data = (u8 *)&field[1];
1066         for (i = 0; i < tp->nr_args; i++)
1067                 if (!tp->args[i].type->print(s, tp->args[i].name,
1068                                              data + tp->args[i].offset, field))
1069                         goto out;
1070
1071         trace_seq_putc(s, '\n');
1072  out:
1073         return trace_handle_return(s);
1074 }
1075
1076 static enum print_line_t
1077 print_kretprobe_event(struct trace_iterator *iter, int flags,
1078                       struct trace_event *event)
1079 {
1080         struct kretprobe_trace_entry_head *field;
1081         struct trace_seq *s = &iter->seq;
1082         struct trace_probe *tp;
1083         u8 *data;
1084         int i;
1085
1086         field = (struct kretprobe_trace_entry_head *)iter->ent;
1087         tp = container_of(event, struct trace_probe, call.event);
1088
1089         trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1090
1091         if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1092                 goto out;
1093
1094         trace_seq_puts(s, " <- ");
1095
1096         if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1097                 goto out;
1098
1099         trace_seq_putc(s, ')');
1100
1101         data = (u8 *)&field[1];
1102         for (i = 0; i < tp->nr_args; i++)
1103                 if (!tp->args[i].type->print(s, tp->args[i].name,
1104                                              data + tp->args[i].offset, field))
1105                         goto out;
1106
1107         trace_seq_putc(s, '\n');
1108
1109  out:
1110         return trace_handle_return(s);
1111 }
1112
1113
1114 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1115 {
1116         int ret, i;
1117         struct kprobe_trace_entry_head field;
1118         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1119
1120         DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1121         /* Set argument names as fields */
1122         for (i = 0; i < tk->tp.nr_args; i++) {
1123                 struct probe_arg *parg = &tk->tp.args[i];
1124
1125                 ret = trace_define_field(event_call, parg->type->fmttype,
1126                                          parg->name,
1127                                          sizeof(field) + parg->offset,
1128                                          parg->type->size,
1129                                          parg->type->is_signed,
1130                                          FILTER_OTHER);
1131                 if (ret)
1132                         return ret;
1133         }
1134         return 0;
1135 }
1136
1137 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1138 {
1139         int ret, i;
1140         struct kretprobe_trace_entry_head field;
1141         struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1142
1143         DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1144         DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1145         /* Set argument names as fields */
1146         for (i = 0; i < tk->tp.nr_args; i++) {
1147                 struct probe_arg *parg = &tk->tp.args[i];
1148
1149                 ret = trace_define_field(event_call, parg->type->fmttype,
1150                                          parg->name,
1151                                          sizeof(field) + parg->offset,
1152                                          parg->type->size,
1153                                          parg->type->is_signed,
1154                                          FILTER_OTHER);
1155                 if (ret)
1156                         return ret;
1157         }
1158         return 0;
1159 }
1160
1161 #ifdef CONFIG_PERF_EVENTS
1162
1163 /* Kprobe profile handler */
1164 static void
1165 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1166 {
1167         struct trace_event_call *call = &tk->tp.call;
1168         struct bpf_prog *prog = call->prog;
1169         struct kprobe_trace_entry_head *entry;
1170         struct hlist_head *head;
1171         int size, __size, dsize;
1172         int rctx;
1173
1174         if (prog && !trace_call_bpf(prog, regs))
1175                 return;
1176
1177         head = this_cpu_ptr(call->perf_events);
1178         if (hlist_empty(head))
1179                 return;
1180
1181         dsize = __get_data_size(&tk->tp, regs);
1182         __size = sizeof(*entry) + tk->tp.size + dsize;
1183         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1184         size -= sizeof(u32);
1185
1186         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1187         if (!entry)
1188                 return;
1189
1190         entry->ip = (unsigned long)tk->rp.kp.addr;
1191         memset(&entry[1], 0, dsize);
1192         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1193         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1194                               head, NULL);
1195 }
1196 NOKPROBE_SYMBOL(kprobe_perf_func);
1197
1198 /* Kretprobe profile handler */
1199 static void
1200 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1201                     struct pt_regs *regs)
1202 {
1203         struct trace_event_call *call = &tk->tp.call;
1204         struct bpf_prog *prog = call->prog;
1205         struct kretprobe_trace_entry_head *entry;
1206         struct hlist_head *head;
1207         int size, __size, dsize;
1208         int rctx;
1209
1210         if (prog && !trace_call_bpf(prog, regs))
1211                 return;
1212
1213         head = this_cpu_ptr(call->perf_events);
1214         if (hlist_empty(head))
1215                 return;
1216
1217         dsize = __get_data_size(&tk->tp, regs);
1218         __size = sizeof(*entry) + tk->tp.size + dsize;
1219         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1220         size -= sizeof(u32);
1221
1222         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1223         if (!entry)
1224                 return;
1225
1226         entry->func = (unsigned long)tk->rp.kp.addr;
1227         entry->ret_ip = (unsigned long)ri->ret_addr;
1228         store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1229         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1230                               head, NULL);
1231 }
1232 NOKPROBE_SYMBOL(kretprobe_perf_func);
1233 #endif  /* CONFIG_PERF_EVENTS */
1234
1235 /*
1236  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1237  *
1238  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1239  * lockless, but we can't race with this __init function.
1240  */
1241 static int kprobe_register(struct trace_event_call *event,
1242                            enum trace_reg type, void *data)
1243 {
1244         struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1245         struct trace_event_file *file = data;
1246
1247         switch (type) {
1248         case TRACE_REG_REGISTER:
1249                 return enable_trace_kprobe(tk, file);
1250         case TRACE_REG_UNREGISTER:
1251                 return disable_trace_kprobe(tk, file);
1252
1253 #ifdef CONFIG_PERF_EVENTS
1254         case TRACE_REG_PERF_REGISTER:
1255                 return enable_trace_kprobe(tk, NULL);
1256         case TRACE_REG_PERF_UNREGISTER:
1257                 return disable_trace_kprobe(tk, NULL);
1258         case TRACE_REG_PERF_OPEN:
1259         case TRACE_REG_PERF_CLOSE:
1260         case TRACE_REG_PERF_ADD:
1261         case TRACE_REG_PERF_DEL:
1262                 return 0;
1263 #endif
1264         }
1265         return 0;
1266 }
1267
1268 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1269 {
1270         struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1271
1272         raw_cpu_inc(*tk->nhit);
1273
1274         if (tk->tp.flags & TP_FLAG_TRACE)
1275                 kprobe_trace_func(tk, regs);
1276 #ifdef CONFIG_PERF_EVENTS
1277         if (tk->tp.flags & TP_FLAG_PROFILE)
1278                 kprobe_perf_func(tk, regs);
1279 #endif
1280         return 0;       /* We don't tweek kernel, so just return 0 */
1281 }
1282 NOKPROBE_SYMBOL(kprobe_dispatcher);
1283
1284 static int
1285 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1286 {
1287         struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1288
1289         raw_cpu_inc(*tk->nhit);
1290
1291         if (tk->tp.flags & TP_FLAG_TRACE)
1292                 kretprobe_trace_func(tk, ri, regs);
1293 #ifdef CONFIG_PERF_EVENTS
1294         if (tk->tp.flags & TP_FLAG_PROFILE)
1295                 kretprobe_perf_func(tk, ri, regs);
1296 #endif
1297         return 0;       /* We don't tweek kernel, so just return 0 */
1298 }
1299 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1300
1301 static struct trace_event_functions kretprobe_funcs = {
1302         .trace          = print_kretprobe_event
1303 };
1304
1305 static struct trace_event_functions kprobe_funcs = {
1306         .trace          = print_kprobe_event
1307 };
1308
1309 static int register_kprobe_event(struct trace_kprobe *tk)
1310 {
1311         struct trace_event_call *call = &tk->tp.call;
1312         int ret;
1313
1314         /* Initialize trace_event_call */
1315         INIT_LIST_HEAD(&call->class->fields);
1316         if (trace_kprobe_is_return(tk)) {
1317                 call->event.funcs = &kretprobe_funcs;
1318                 call->class->define_fields = kretprobe_event_define_fields;
1319         } else {
1320                 call->event.funcs = &kprobe_funcs;
1321                 call->class->define_fields = kprobe_event_define_fields;
1322         }
1323         if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1324                 return -ENOMEM;
1325         ret = register_trace_event(&call->event);
1326         if (!ret) {
1327                 kfree(call->print_fmt);
1328                 return -ENODEV;
1329         }
1330         call->flags = TRACE_EVENT_FL_KPROBE;
1331         call->class->reg = kprobe_register;
1332         call->data = tk;
1333         ret = trace_add_event_call(call);
1334         if (ret) {
1335                 pr_info("Failed to register kprobe event: %s\n",
1336                         trace_event_name(call));
1337                 kfree(call->print_fmt);
1338                 unregister_trace_event(&call->event);
1339         }
1340         return ret;
1341 }
1342
1343 static int unregister_kprobe_event(struct trace_kprobe *tk)
1344 {
1345         int ret;
1346
1347         /* tp->event is unregistered in trace_remove_event_call() */
1348         ret = trace_remove_event_call(&tk->tp.call);
1349         if (!ret)
1350                 kfree(tk->tp.call.print_fmt);
1351         return ret;
1352 }
1353
1354 /* Make a tracefs interface for controlling probe points */
1355 static __init int init_kprobe_trace(void)
1356 {
1357         struct dentry *d_tracer;
1358         struct dentry *entry;
1359
1360         if (register_module_notifier(&trace_kprobe_module_nb))
1361                 return -EINVAL;
1362
1363         d_tracer = tracing_init_dentry();
1364         if (IS_ERR(d_tracer))
1365                 return 0;
1366
1367         entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1368                                     NULL, &kprobe_events_ops);
1369
1370         /* Event list interface */
1371         if (!entry)
1372                 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1373
1374         /* Profile interface */
1375         entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1376                                     NULL, &kprobe_profile_ops);
1377
1378         if (!entry)
1379                 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1380         return 0;
1381 }
1382 fs_initcall(init_kprobe_trace);
1383
1384
1385 #ifdef CONFIG_FTRACE_STARTUP_TEST
1386 /*
1387  * The "__used" keeps gcc from removing the function symbol
1388  * from the kallsyms table. 'noinline' makes sure that there
1389  * isn't an inlined version used by the test method below
1390  */
1391 static __used __init noinline int
1392 kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
1393 {
1394         return a1 + a2 + a3 + a4 + a5 + a6;
1395 }
1396
1397 static __init struct trace_event_file *
1398 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1399 {
1400         struct trace_event_file *file;
1401
1402         list_for_each_entry(file, &tr->events, list)
1403                 if (file->event_call == &tk->tp.call)
1404                         return file;
1405
1406         return NULL;
1407 }
1408
1409 /*
1410  * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1411  * stage, we can do this lockless.
1412  */
1413 static __init int kprobe_trace_self_tests_init(void)
1414 {
1415         int ret, warn = 0;
1416         int (*target)(int, int, int, int, int, int);
1417         struct trace_kprobe *tk;
1418         struct trace_event_file *file;
1419
1420         if (tracing_is_disabled())
1421                 return -ENODEV;
1422
1423         target = kprobe_trace_selftest_target;
1424
1425         pr_info("Testing kprobe tracing: ");
1426
1427         ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
1428                                   "$stack $stack0 +0($stack)",
1429                                   create_trace_kprobe);
1430         if (WARN_ON_ONCE(ret)) {
1431                 pr_warn("error on probing function entry.\n");
1432                 warn++;
1433         } else {
1434                 /* Enable trace point */
1435                 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1436                 if (WARN_ON_ONCE(tk == NULL)) {
1437                         pr_warn("error on getting new probe.\n");
1438                         warn++;
1439                 } else {
1440                         file = find_trace_probe_file(tk, top_trace_array());
1441                         if (WARN_ON_ONCE(file == NULL)) {
1442                                 pr_warn("error on getting probe file.\n");
1443                                 warn++;
1444                         } else
1445                                 enable_trace_kprobe(tk, file);
1446                 }
1447         }
1448
1449         ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1450                                   "$retval", create_trace_kprobe);
1451         if (WARN_ON_ONCE(ret)) {
1452                 pr_warn("error on probing function return.\n");
1453                 warn++;
1454         } else {
1455                 /* Enable trace point */
1456                 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1457                 if (WARN_ON_ONCE(tk == NULL)) {
1458                         pr_warn("error on getting 2nd new probe.\n");
1459                         warn++;
1460                 } else {
1461                         file = find_trace_probe_file(tk, top_trace_array());
1462                         if (WARN_ON_ONCE(file == NULL)) {
1463                                 pr_warn("error on getting probe file.\n");
1464                                 warn++;
1465                         } else
1466                                 enable_trace_kprobe(tk, file);
1467                 }
1468         }
1469
1470         if (warn)
1471                 goto end;
1472
1473         ret = target(1, 2, 3, 4, 5, 6);
1474
1475         /*
1476          * Not expecting an error here, the check is only to prevent the
1477          * optimizer from removing the call to target() as otherwise there
1478          * are no side-effects and the call is never performed.
1479          */
1480         if (ret != 21)
1481                 warn++;
1482
1483         /* Disable trace points before removing it */
1484         tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1485         if (WARN_ON_ONCE(tk == NULL)) {
1486                 pr_warn("error on getting test probe.\n");
1487                 warn++;
1488         } else {
1489                 if (trace_kprobe_nhit(tk) != 1) {
1490                         pr_warn("incorrect number of testprobe hits\n");
1491                         warn++;
1492                 }
1493
1494                 file = find_trace_probe_file(tk, top_trace_array());
1495                 if (WARN_ON_ONCE(file == NULL)) {
1496                         pr_warn("error on getting probe file.\n");
1497                         warn++;
1498                 } else
1499                         disable_trace_kprobe(tk, file);
1500         }
1501
1502         tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1503         if (WARN_ON_ONCE(tk == NULL)) {
1504                 pr_warn("error on getting 2nd test probe.\n");
1505                 warn++;
1506         } else {
1507                 if (trace_kprobe_nhit(tk) != 1) {
1508                         pr_warn("incorrect number of testprobe2 hits\n");
1509                         warn++;
1510                 }
1511
1512                 file = find_trace_probe_file(tk, top_trace_array());
1513                 if (WARN_ON_ONCE(file == NULL)) {
1514                         pr_warn("error on getting probe file.\n");
1515                         warn++;
1516                 } else
1517                         disable_trace_kprobe(tk, file);
1518         }
1519
1520         ret = traceprobe_command("-:testprobe", create_trace_kprobe);
1521         if (WARN_ON_ONCE(ret)) {
1522                 pr_warn("error on deleting a probe.\n");
1523                 warn++;
1524         }
1525
1526         ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
1527         if (WARN_ON_ONCE(ret)) {
1528                 pr_warn("error on deleting a probe.\n");
1529                 warn++;
1530         }
1531
1532 end:
1533         release_all_trace_kprobes();
1534         /*
1535          * Wait for the optimizer work to finish. Otherwise it might fiddle
1536          * with probes in already freed __init text.
1537          */
1538         wait_for_kprobe_optimizer();
1539         if (warn)
1540                 pr_cont("NG: Some tests are failed. Please check them.\n");
1541         else
1542                 pr_cont("OK\n");
1543         return 0;
1544 }
1545
1546 late_initcall(kprobe_trace_self_tests_init);
1547
1548 #endif