]> git.karo-electronics.de Git - mv-sheeva.git/blob - kernel/trace/ftrace.c
ftrace: add filter select functions to trace
[mv-sheeva.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/kthread.h>
22 #include <linux/hardirq.h>
23 #include <linux/ftrace.h>
24 #include <linux/uaccess.h>
25 #include <linux/sysctl.h>
26 #include <linux/hash.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
29
30 #include "trace.h"
31
32 int ftrace_enabled;
33 static int last_ftrace_enabled;
34
35 static DEFINE_SPINLOCK(ftrace_lock);
36 static DEFINE_MUTEX(ftrace_sysctl_lock);
37
38 static struct ftrace_ops ftrace_list_end __read_mostly =
39 {
40         .func = ftrace_stub,
41 };
42
43 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
44 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
45
46 /* mcount is defined per arch in assembly */
47 EXPORT_SYMBOL(mcount);
48
49 notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
50 {
51         struct ftrace_ops *op = ftrace_list;
52
53         /* in case someone actually ports this to alpha! */
54         read_barrier_depends();
55
56         while (op != &ftrace_list_end) {
57                 /* silly alpha */
58                 read_barrier_depends();
59                 op->func(ip, parent_ip);
60                 op = op->next;
61         };
62 }
63
64 /**
65  * clear_ftrace_function - reset the ftrace function
66  *
67  * This NULLs the ftrace function and in essence stops
68  * tracing.  There may be lag
69  */
70 void clear_ftrace_function(void)
71 {
72         ftrace_trace_function = ftrace_stub;
73 }
74
75 static int notrace __register_ftrace_function(struct ftrace_ops *ops)
76 {
77         /* Should never be called by interrupts */
78         spin_lock(&ftrace_lock);
79
80         ops->next = ftrace_list;
81         /*
82          * We are entering ops into the ftrace_list but another
83          * CPU might be walking that list. We need to make sure
84          * the ops->next pointer is valid before another CPU sees
85          * the ops pointer included into the ftrace_list.
86          */
87         smp_wmb();
88         ftrace_list = ops;
89
90         if (ftrace_enabled) {
91                 /*
92                  * For one func, simply call it directly.
93                  * For more than one func, call the chain.
94                  */
95                 if (ops->next == &ftrace_list_end)
96                         ftrace_trace_function = ops->func;
97                 else
98                         ftrace_trace_function = ftrace_list_func;
99         }
100
101         spin_unlock(&ftrace_lock);
102
103         return 0;
104 }
105
106 static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
107 {
108         struct ftrace_ops **p;
109         int ret = 0;
110
111         spin_lock(&ftrace_lock);
112
113         /*
114          * If we are removing the last function, then simply point
115          * to the ftrace_stub.
116          */
117         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
118                 ftrace_trace_function = ftrace_stub;
119                 ftrace_list = &ftrace_list_end;
120                 goto out;
121         }
122
123         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
124                 if (*p == ops)
125                         break;
126
127         if (*p != ops) {
128                 ret = -1;
129                 goto out;
130         }
131
132         *p = (*p)->next;
133
134         if (ftrace_enabled) {
135                 /* If we only have one func left, then call that directly */
136                 if (ftrace_list == &ftrace_list_end ||
137                     ftrace_list->next == &ftrace_list_end)
138                         ftrace_trace_function = ftrace_list->func;
139         }
140
141  out:
142         spin_unlock(&ftrace_lock);
143
144         return ret;
145 }
146
147 #ifdef CONFIG_DYNAMIC_FTRACE
148
149 enum {
150         FTRACE_ENABLE_CALLS             = (1 << 0),
151         FTRACE_DISABLE_CALLS            = (1 << 1),
152         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
153         FTRACE_ENABLE_MCOUNT            = (1 << 3),
154         FTRACE_DISABLE_MCOUNT           = (1 << 4),
155 };
156
157 static int ftrace_filtered;
158
159 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
160
161 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
162
163 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
164 static DEFINE_MUTEX(ftraced_lock);
165 static DEFINE_MUTEX(ftrace_filter_lock);
166
167 struct ftrace_page {
168         struct ftrace_page      *next;
169         int                     index;
170         struct dyn_ftrace       records[];
171 } __attribute__((packed));
172
173 #define ENTRIES_PER_PAGE \
174   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
175
176 /* estimate from running different kernels */
177 #define NR_TO_INIT              10000
178
179 static struct ftrace_page       *ftrace_pages_start;
180 static struct ftrace_page       *ftrace_pages;
181
182 static int ftraced_trigger;
183 static int ftraced_suspend;
184
185 static int ftrace_record_suspend;
186
187 static inline int
188 notrace ftrace_ip_in_hash(unsigned long ip, unsigned long key)
189 {
190         struct dyn_ftrace *p;
191         struct hlist_node *t;
192         int found = 0;
193
194         hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
195                 if (p->ip == ip) {
196                         found = 1;
197                         break;
198                 }
199         }
200
201         return found;
202 }
203
204 static inline void notrace
205 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
206 {
207         hlist_add_head(&node->node, &ftrace_hash[key]);
208 }
209
210 static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
211 {
212         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
213                 if (!ftrace_pages->next)
214                         return NULL;
215                 ftrace_pages = ftrace_pages->next;
216         }
217
218         return &ftrace_pages->records[ftrace_pages->index++];
219 }
220
221 static void notrace
222 ftrace_record_ip(unsigned long ip)
223 {
224         struct dyn_ftrace *node;
225         unsigned long flags;
226         unsigned long key;
227         int resched;
228         int atomic;
229
230         if (!ftrace_enabled)
231                 return;
232
233         resched = need_resched();
234         preempt_disable_notrace();
235
236         /* We simply need to protect against recursion */
237         __get_cpu_var(ftrace_shutdown_disable_cpu)++;
238         if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1)
239                 goto out;
240
241         if (unlikely(ftrace_record_suspend))
242                 goto out;
243
244         key = hash_long(ip, FTRACE_HASHBITS);
245
246         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
247
248         if (ftrace_ip_in_hash(ip, key))
249                 goto out;
250
251         atomic = irqs_disabled();
252
253         spin_lock_irqsave(&ftrace_shutdown_lock, flags);
254
255         /* This ip may have hit the hash before the lock */
256         if (ftrace_ip_in_hash(ip, key))
257                 goto out_unlock;
258
259         /*
260          * There's a slight race that the ftraced will update the
261          * hash and reset here. If it is already converted, skip it.
262          */
263         if (ftrace_ip_converted(ip))
264                 goto out_unlock;
265
266         node = ftrace_alloc_dyn_node(ip);
267         if (!node)
268                 goto out_unlock;
269
270         node->ip = ip;
271
272         ftrace_add_hash(node, key);
273
274         ftraced_trigger = 1;
275
276  out_unlock:
277         spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
278  out:
279         __get_cpu_var(ftrace_shutdown_disable_cpu)--;
280
281         /* prevent recursion with scheduler */
282         if (resched)
283                 preempt_enable_no_resched_notrace();
284         else
285                 preempt_enable_notrace();
286 }
287
288 #define FTRACE_ADDR ((long)(&ftrace_caller))
289 #define MCOUNT_ADDR ((long)(&mcount))
290
291 static void notrace
292 __ftrace_replace_code(struct dyn_ftrace *rec,
293                       unsigned char *old, unsigned char *new, int enable)
294 {
295         unsigned long ip;
296         int failed;
297
298         ip = rec->ip;
299
300         if (ftrace_filtered && enable) {
301                 unsigned long fl;
302                 /*
303                  * If filtering is on:
304                  *
305                  * If this record is set to be filtered and
306                  * is enabled then do nothing.
307                  *
308                  * If this record is set to be filtered and
309                  * it is not enabled, enable it.
310                  *
311                  * If this record is not set to be filtered
312                  * and it is not enabled do nothing.
313                  *
314                  * If this record is not set to be filtered and
315                  * it is enabled, disable it.
316                  */
317                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
318
319                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
320                     (fl == 0))
321                         return;
322
323                 /*
324                  * If it is enabled disable it,
325                  * otherwise enable it!
326                  */
327                 if (fl == FTRACE_FL_ENABLED) {
328                         /* swap new and old */
329                         new = old;
330                         old = ftrace_call_replace(ip, FTRACE_ADDR);
331                         rec->flags &= ~FTRACE_FL_ENABLED;
332                 } else {
333                         new = ftrace_call_replace(ip, FTRACE_ADDR);
334                         rec->flags |= FTRACE_FL_ENABLED;
335                 }
336         } else {
337
338                 if (enable)
339                         new = ftrace_call_replace(ip, FTRACE_ADDR);
340                 else
341                         old = ftrace_call_replace(ip, FTRACE_ADDR);
342
343                 if (enable) {
344                         if (rec->flags & FTRACE_FL_ENABLED)
345                                 return;
346                         rec->flags |= FTRACE_FL_ENABLED;
347                 } else {
348                         if (!(rec->flags & FTRACE_FL_ENABLED))
349                                 return;
350                         rec->flags &= ~FTRACE_FL_ENABLED;
351                 }
352         }
353
354         failed = ftrace_modify_code(ip, old, new);
355         if (failed)
356                 rec->flags |= FTRACE_FL_FAILED;
357 }
358
359 static void notrace ftrace_replace_code(int enable)
360 {
361         unsigned char *new = NULL, *old = NULL;
362         struct dyn_ftrace *rec;
363         struct ftrace_page *pg;
364         int i;
365
366         if (enable)
367                 old = ftrace_nop_replace();
368         else
369                 new = ftrace_nop_replace();
370
371         for (pg = ftrace_pages_start; pg; pg = pg->next) {
372                 for (i = 0; i < pg->index; i++) {
373                         rec = &pg->records[i];
374
375                         /* don't modify code that has already faulted */
376                         if (rec->flags & FTRACE_FL_FAILED)
377                                 continue;
378
379                         __ftrace_replace_code(rec, old, new, enable);
380                 }
381         }
382 }
383
384 static notrace void ftrace_shutdown_replenish(void)
385 {
386         if (ftrace_pages->next)
387                 return;
388
389         /* allocate another page */
390         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
391 }
392
393 static notrace void
394 ftrace_code_disable(struct dyn_ftrace *rec)
395 {
396         unsigned long ip;
397         unsigned char *nop, *call;
398         int failed;
399
400         ip = rec->ip;
401
402         nop = ftrace_nop_replace();
403         call = ftrace_call_replace(ip, MCOUNT_ADDR);
404
405         failed = ftrace_modify_code(ip, call, nop);
406         if (failed)
407                 rec->flags |= FTRACE_FL_FAILED;
408 }
409
410 static int notrace __ftrace_modify_code(void *data)
411 {
412         unsigned long addr;
413         int *command = data;
414
415         if (*command & FTRACE_ENABLE_CALLS)
416                 ftrace_replace_code(1);
417         else if (*command & FTRACE_DISABLE_CALLS)
418                 ftrace_replace_code(0);
419
420         if (*command & FTRACE_UPDATE_TRACE_FUNC)
421                 ftrace_update_ftrace_func(ftrace_trace_function);
422
423         if (*command & FTRACE_ENABLE_MCOUNT) {
424                 addr = (unsigned long)ftrace_record_ip;
425                 ftrace_mcount_set(&addr);
426         } else if (*command & FTRACE_DISABLE_MCOUNT) {
427                 addr = (unsigned long)ftrace_stub;
428                 ftrace_mcount_set(&addr);
429         }
430
431         return 0;
432 }
433
434 static void notrace ftrace_run_update_code(int command)
435 {
436         stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
437 }
438
439 static ftrace_func_t saved_ftrace_func;
440
441 static void notrace ftrace_startup(void)
442 {
443         int command = 0;
444
445         mutex_lock(&ftraced_lock);
446         ftraced_suspend++;
447         if (ftraced_suspend == 1)
448                 command |= FTRACE_ENABLE_CALLS;
449
450         if (saved_ftrace_func != ftrace_trace_function) {
451                 saved_ftrace_func = ftrace_trace_function;
452                 command |= FTRACE_UPDATE_TRACE_FUNC;
453         }
454
455         if (!command || !ftrace_enabled)
456                 goto out;
457
458         ftrace_run_update_code(command);
459  out:
460         mutex_unlock(&ftraced_lock);
461 }
462
463 static void notrace ftrace_shutdown(void)
464 {
465         int command = 0;
466
467         mutex_lock(&ftraced_lock);
468         ftraced_suspend--;
469         if (!ftraced_suspend)
470                 command |= FTRACE_DISABLE_CALLS;
471
472         if (saved_ftrace_func != ftrace_trace_function) {
473                 saved_ftrace_func = ftrace_trace_function;
474                 command |= FTRACE_UPDATE_TRACE_FUNC;
475         }
476
477         if (!command || !ftrace_enabled)
478                 goto out;
479
480         ftrace_run_update_code(command);
481  out:
482         mutex_unlock(&ftraced_lock);
483 }
484
485 static void notrace ftrace_startup_sysctl(void)
486 {
487         int command = FTRACE_ENABLE_MCOUNT;
488
489         mutex_lock(&ftraced_lock);
490         /* Force update next time */
491         saved_ftrace_func = NULL;
492         /* ftraced_suspend is true if we want ftrace running */
493         if (ftraced_suspend)
494                 command |= FTRACE_ENABLE_CALLS;
495
496         ftrace_run_update_code(command);
497         mutex_unlock(&ftraced_lock);
498 }
499
500 static void notrace ftrace_shutdown_sysctl(void)
501 {
502         int command = FTRACE_DISABLE_MCOUNT;
503
504         mutex_lock(&ftraced_lock);
505         /* ftraced_suspend is true if ftrace is running */
506         if (ftraced_suspend)
507                 command |= FTRACE_DISABLE_CALLS;
508
509         ftrace_run_update_code(command);
510         mutex_unlock(&ftraced_lock);
511 }
512
513 static cycle_t          ftrace_update_time;
514 static unsigned long    ftrace_update_cnt;
515 unsigned long           ftrace_update_tot_cnt;
516
517 static int notrace __ftrace_update_code(void *ignore)
518 {
519         struct dyn_ftrace *p;
520         struct hlist_head head;
521         struct hlist_node *t;
522         int save_ftrace_enabled;
523         cycle_t start, stop;
524         int i;
525
526         /* Don't be recording funcs now */
527         save_ftrace_enabled = ftrace_enabled;
528         ftrace_enabled = 0;
529
530         start = now(raw_smp_processor_id());
531         ftrace_update_cnt = 0;
532
533         /* No locks needed, the machine is stopped! */
534         for (i = 0; i < FTRACE_HASHSIZE; i++) {
535                 if (hlist_empty(&ftrace_hash[i]))
536                         continue;
537
538                 head = ftrace_hash[i];
539                 INIT_HLIST_HEAD(&ftrace_hash[i]);
540
541                 /* all CPUS are stopped, we are safe to modify code */
542                 hlist_for_each_entry(p, t, &head, node) {
543                         ftrace_code_disable(p);
544                         ftrace_update_cnt++;
545                 }
546
547         }
548
549         stop = now(raw_smp_processor_id());
550         ftrace_update_time = stop - start;
551         ftrace_update_tot_cnt += ftrace_update_cnt;
552
553         ftrace_enabled = save_ftrace_enabled;
554
555         return 0;
556 }
557
558 static void notrace ftrace_update_code(void)
559 {
560         stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
561 }
562
563 static int notrace ftraced(void *ignore)
564 {
565         unsigned long usecs;
566
567         set_current_state(TASK_INTERRUPTIBLE);
568
569         while (!kthread_should_stop()) {
570
571                 /* check once a second */
572                 schedule_timeout(HZ);
573
574                 mutex_lock(&ftrace_sysctl_lock);
575                 mutex_lock(&ftraced_lock);
576                 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
577                         ftrace_record_suspend++;
578                         ftrace_update_code();
579                         usecs = nsecs_to_usecs(ftrace_update_time);
580                         if (ftrace_update_tot_cnt > 100000) {
581                                 ftrace_update_tot_cnt = 0;
582                                 pr_info("hm, dftrace overflow: %lu change%s"
583                                          " (%lu total) in %lu usec%s\n",
584                                         ftrace_update_cnt,
585                                         ftrace_update_cnt != 1 ? "s" : "",
586                                         ftrace_update_tot_cnt,
587                                         usecs, usecs != 1 ? "s" : "");
588                                 WARN_ON_ONCE(1);
589                         }
590                         ftraced_trigger = 0;
591                         ftrace_record_suspend--;
592                 }
593                 mutex_unlock(&ftraced_lock);
594                 mutex_unlock(&ftrace_sysctl_lock);
595
596                 ftrace_shutdown_replenish();
597
598                 set_current_state(TASK_INTERRUPTIBLE);
599         }
600         __set_current_state(TASK_RUNNING);
601         return 0;
602 }
603
604 static int __init ftrace_dyn_table_alloc(void)
605 {
606         struct ftrace_page *pg;
607         int cnt;
608         int i;
609
610         /* allocate a few pages */
611         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
612         if (!ftrace_pages_start)
613                 return -1;
614
615         /*
616          * Allocate a few more pages.
617          *
618          * TODO: have some parser search vmlinux before
619          *   final linking to find all calls to ftrace.
620          *   Then we can:
621          *    a) know how many pages to allocate.
622          *     and/or
623          *    b) set up the table then.
624          *
625          *  The dynamic code is still necessary for
626          *  modules.
627          */
628
629         pg = ftrace_pages = ftrace_pages_start;
630
631         cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
632
633         for (i = 0; i < cnt; i++) {
634                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
635
636                 /* If we fail, we'll try later anyway */
637                 if (!pg->next)
638                         break;
639
640                 pg = pg->next;
641         }
642
643         return 0;
644 }
645
646 enum {
647         FTRACE_ITER_FILTER      = (1 << 0),
648         FTRACE_ITER_CONT        = (1 << 1),
649 };
650
651 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
652
653 struct ftrace_iterator {
654         loff_t                  pos;
655         struct ftrace_page      *pg;
656         unsigned                idx;
657         unsigned                flags;
658         unsigned char           buffer[FTRACE_BUFF_MAX+1];
659         unsigned                buffer_idx;
660         unsigned                filtered;
661 };
662
663 static void notrace *
664 t_next(struct seq_file *m, void *v, loff_t *pos)
665 {
666         struct ftrace_iterator *iter = m->private;
667         struct dyn_ftrace *rec = NULL;
668
669         (*pos)++;
670
671  retry:
672         if (iter->idx >= iter->pg->index) {
673                 if (iter->pg->next) {
674                         iter->pg = iter->pg->next;
675                         iter->idx = 0;
676                         goto retry;
677                 }
678         } else {
679                 rec = &iter->pg->records[iter->idx++];
680                 if ((rec->flags & FTRACE_FL_FAILED) ||
681                     ((iter->flags & FTRACE_ITER_FILTER) &&
682                      !(rec->flags & FTRACE_FL_FILTER))) {
683                         rec = NULL;
684                         goto retry;
685                 }
686         }
687
688         iter->pos = *pos;
689
690         return rec;
691 }
692
693 static void *t_start(struct seq_file *m, loff_t *pos)
694 {
695         struct ftrace_iterator *iter = m->private;
696         void *p = NULL;
697         loff_t l = -1;
698
699         if (*pos != iter->pos) {
700                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
701                         ;
702         } else {
703                 l = *pos;
704                 p = t_next(m, p, &l);
705         }
706
707         return p;
708 }
709
710 static void t_stop(struct seq_file *m, void *p)
711 {
712 }
713
714 static int t_show(struct seq_file *m, void *v)
715 {
716         struct dyn_ftrace *rec = v;
717         char str[KSYM_SYMBOL_LEN];
718
719         if (!rec)
720                 return 0;
721
722         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
723
724         seq_printf(m, "%s\n", str);
725
726         return 0;
727 }
728
729 static struct seq_operations show_ftrace_seq_ops = {
730         .start = t_start,
731         .next = t_next,
732         .stop = t_stop,
733         .show = t_show,
734 };
735
736 static int notrace
737 ftrace_avail_open(struct inode *inode, struct file *file)
738 {
739         struct ftrace_iterator *iter;
740         int ret;
741
742         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
743         if (!iter)
744                 return -ENOMEM;
745
746         iter->pg = ftrace_pages_start;
747         iter->pos = -1;
748
749         ret = seq_open(file, &show_ftrace_seq_ops);
750         if (!ret) {
751                 struct seq_file *m = file->private_data;
752                 m->private = iter;
753         } else
754                 kfree(iter);
755
756         return ret;
757 }
758
759 int ftrace_avail_release(struct inode *inode, struct file *file)
760 {
761         struct seq_file *m = (struct seq_file *)file->private_data;
762         struct ftrace_iterator *iter = m->private;
763
764         seq_release(inode, file);
765         kfree(iter);
766         return 0;
767 }
768
769 static void notrace ftrace_filter_reset(void)
770 {
771         struct ftrace_page *pg;
772         struct dyn_ftrace *rec;
773         unsigned i;
774
775         /* keep kstop machine from running */
776         preempt_disable();
777         ftrace_filtered = 0;
778         pg = ftrace_pages_start;
779         while (pg) {
780                 for (i = 0; i < pg->index; i++) {
781                         rec = &pg->records[i];
782                         if (rec->flags & FTRACE_FL_FAILED)
783                                 continue;
784                         rec->flags &= ~FTRACE_FL_FILTER;
785                 }
786                 pg = pg->next;
787         }
788         preempt_enable();
789 }
790
791 static int notrace
792 ftrace_filter_open(struct inode *inode, struct file *file)
793 {
794         struct ftrace_iterator *iter;
795         int ret = 0;
796
797         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
798         if (!iter)
799                 return -ENOMEM;
800
801         mutex_lock(&ftrace_filter_lock);
802         if ((file->f_mode & FMODE_WRITE) &&
803             !(file->f_flags & O_APPEND))
804                 ftrace_filter_reset();
805
806         if (file->f_mode & FMODE_READ) {
807                 iter->pg = ftrace_pages_start;
808                 iter->pos = -1;
809                 iter->flags = FTRACE_ITER_FILTER;
810
811                 ret = seq_open(file, &show_ftrace_seq_ops);
812                 if (!ret) {
813                         struct seq_file *m = file->private_data;
814                         m->private = iter;
815                 } else
816                         kfree(iter);
817         } else
818                 file->private_data = iter;
819         mutex_unlock(&ftrace_filter_lock);
820
821         return ret;
822 }
823
824 static ssize_t notrace
825 ftrace_filter_read(struct file *file, char __user *ubuf,
826                        size_t cnt, loff_t *ppos)
827 {
828         if (file->f_mode & FMODE_READ)
829                 return seq_read(file, ubuf, cnt, ppos);
830         else
831                 return -EPERM;
832 }
833
834 static loff_t notrace
835 ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
836 {
837         loff_t ret;
838
839         if (file->f_mode & FMODE_READ)
840                 ret = seq_lseek(file, offset, origin);
841         else
842                 file->f_pos = ret = 1;
843
844         return ret;
845 }
846
847 enum {
848         MATCH_FULL,
849         MATCH_FRONT_ONLY,
850         MATCH_MIDDLE_ONLY,
851         MATCH_END_ONLY,
852 };
853
854 static void notrace
855 ftrace_match(unsigned char *buff, int len)
856 {
857         char str[KSYM_SYMBOL_LEN];
858         char *search = NULL;
859         struct ftrace_page *pg;
860         struct dyn_ftrace *rec;
861         int type = MATCH_FULL;
862         unsigned i, match = 0, search_len = 0;
863
864         for (i = 0; i < len; i++) {
865                 if (buff[i] == '*') {
866                         if (!i) {
867                                 search = buff + i + 1;
868                                 type = MATCH_END_ONLY;
869                                 search_len = len - (i + 1);
870                         } else {
871                                 if (type == MATCH_END_ONLY) {
872                                         type = MATCH_MIDDLE_ONLY;
873                                 } else {
874                                         match = i;
875                                         type = MATCH_FRONT_ONLY;
876                                 }
877                                 buff[i] = 0;
878                                 break;
879                         }
880                 }
881         }
882
883         /* keep kstop machine from running */
884         preempt_disable();
885         ftrace_filtered = 1;
886         pg = ftrace_pages_start;
887         while (pg) {
888                 for (i = 0; i < pg->index; i++) {
889                         int matched = 0;
890                         char *ptr;
891
892                         rec = &pg->records[i];
893                         if (rec->flags & FTRACE_FL_FAILED)
894                                 continue;
895                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
896                         switch (type) {
897                         case MATCH_FULL:
898                                 if (strcmp(str, buff) == 0)
899                                         matched = 1;
900                                 break;
901                         case MATCH_FRONT_ONLY:
902                                 if (memcmp(str, buff, match) == 0)
903                                         matched = 1;
904                                 break;
905                         case MATCH_MIDDLE_ONLY:
906                                 if (strstr(str, search))
907                                         matched = 1;
908                                 break;
909                         case MATCH_END_ONLY:
910                                 ptr = strstr(str, search);
911                                 if (ptr && (ptr[search_len] == 0))
912                                         matched = 1;
913                                 break;
914                         }
915                         if (matched)
916                                 rec->flags |= FTRACE_FL_FILTER;
917                 }
918                 pg = pg->next;
919         }
920         preempt_enable();
921 }
922
923 static ssize_t notrace
924 ftrace_filter_write(struct file *file, const char __user *ubuf,
925                     size_t cnt, loff_t *ppos)
926 {
927         struct ftrace_iterator *iter;
928         char ch;
929         size_t read = 0;
930         ssize_t ret;
931
932         if (!cnt || cnt < 0)
933                 return 0;
934
935         mutex_lock(&ftrace_filter_lock);
936
937         if (file->f_mode & FMODE_READ) {
938                 struct seq_file *m = file->private_data;
939                 iter = m->private;
940         } else
941                 iter = file->private_data;
942
943         if (!*ppos) {
944                 iter->flags &= ~FTRACE_ITER_CONT;
945                 iter->buffer_idx = 0;
946         }
947
948         ret = get_user(ch, ubuf++);
949         if (ret)
950                 goto out;
951         read++;
952         cnt--;
953
954         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
955                 /* skip white space */
956                 while (cnt && isspace(ch)) {
957                         ret = get_user(ch, ubuf++);
958                         if (ret)
959                                 goto out;
960                         read++;
961                         cnt--;
962                 }
963
964
965                 if (isspace(ch)) {
966                         file->f_pos += read;
967                         ret = read;
968                         goto out;
969                 }
970
971                 iter->buffer_idx = 0;
972         }
973
974         while (cnt && !isspace(ch)) {
975                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
976                         iter->buffer[iter->buffer_idx++] = ch;
977                 else {
978                         ret = -EINVAL;
979                         goto out;
980                 }
981                 ret = get_user(ch, ubuf++);
982                 if (ret)
983                         goto out;
984                 read++;
985                 cnt--;
986         }
987
988         if (isspace(ch)) {
989                 iter->filtered++;
990                 iter->buffer[iter->buffer_idx] = 0;
991                 ftrace_match(iter->buffer, iter->buffer_idx);
992                 iter->buffer_idx = 0;
993         } else
994                 iter->flags |= FTRACE_ITER_CONT;
995
996
997         file->f_pos += read;
998
999         ret = read;
1000  out:
1001         mutex_unlock(&ftrace_filter_lock);
1002
1003         return ret;
1004 }
1005
1006 static int notrace
1007 ftrace_filter_release(struct inode *inode, struct file *file)
1008 {
1009         struct seq_file *m = (struct seq_file *)file->private_data;
1010         struct ftrace_iterator *iter;
1011
1012         mutex_lock(&ftrace_filter_lock);
1013         if (file->f_mode & FMODE_READ) {
1014                 iter = m->private;
1015
1016                 seq_release(inode, file);
1017         } else
1018                 iter = file->private_data;
1019
1020         if (iter->buffer_idx) {
1021                 iter->filtered++;
1022                 iter->buffer[iter->buffer_idx] = 0;
1023                 ftrace_match(iter->buffer, iter->buffer_idx);
1024         }
1025
1026         mutex_lock(&ftrace_sysctl_lock);
1027         mutex_lock(&ftraced_lock);
1028         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1029                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1030         mutex_unlock(&ftraced_lock);
1031         mutex_unlock(&ftrace_sysctl_lock);
1032
1033         kfree(iter);
1034         mutex_unlock(&ftrace_filter_lock);
1035         return 0;
1036 }
1037
1038 static struct file_operations ftrace_avail_fops = {
1039         .open = ftrace_avail_open,
1040         .read = seq_read,
1041         .llseek = seq_lseek,
1042         .release = ftrace_avail_release,
1043 };
1044
1045 static struct file_operations ftrace_filter_fops = {
1046         .open = ftrace_filter_open,
1047         .read = ftrace_filter_read,
1048         .write = ftrace_filter_write,
1049         .llseek = ftrace_filter_lseek,
1050         .release = ftrace_filter_release,
1051 };
1052
1053 static __init int ftrace_init_debugfs(void)
1054 {
1055         struct dentry *d_tracer;
1056         struct dentry *entry;
1057
1058         d_tracer = tracing_init_dentry();
1059
1060         entry = debugfs_create_file("available_filter_functions", 0444,
1061                                     d_tracer, NULL, &ftrace_avail_fops);
1062         if (!entry)
1063                 pr_warning("Could not create debugfs "
1064                            "'available_filter_functions' entry\n");
1065
1066         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1067                                     NULL, &ftrace_filter_fops);
1068         if (!entry)
1069                 pr_warning("Could not create debugfs "
1070                            "'set_ftrace_filter' entry\n");
1071         return 0;
1072 }
1073
1074 fs_initcall(ftrace_init_debugfs);
1075
1076 static int __init notrace ftrace_dynamic_init(void)
1077 {
1078         struct task_struct *p;
1079         unsigned long addr;
1080         int ret;
1081
1082         addr = (unsigned long)ftrace_record_ip;
1083         stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1084
1085         /* ftrace_dyn_arch_init places the return code in addr */
1086         if (addr)
1087                 return addr;
1088
1089         ret = ftrace_dyn_table_alloc();
1090         if (ret)
1091                 return ret;
1092
1093         p = kthread_run(ftraced, NULL, "ftraced");
1094         if (IS_ERR(p))
1095                 return -1;
1096
1097         last_ftrace_enabled = ftrace_enabled = 1;
1098
1099         return 0;
1100 }
1101
1102 core_initcall(ftrace_dynamic_init);
1103 #else
1104 # define ftrace_startup()         do { } while (0)
1105 # define ftrace_shutdown()        do { } while (0)
1106 # define ftrace_startup_sysctl()  do { } while (0)
1107 # define ftrace_shutdown_sysctl() do { } while (0)
1108 #endif /* CONFIG_DYNAMIC_FTRACE */
1109
1110 /**
1111  * register_ftrace_function - register a function for profiling
1112  * @ops - ops structure that holds the function for profiling.
1113  *
1114  * Register a function to be called by all functions in the
1115  * kernel.
1116  *
1117  * Note: @ops->func and all the functions it calls must be labeled
1118  *       with "notrace", otherwise it will go into a
1119  *       recursive loop.
1120  */
1121 int register_ftrace_function(struct ftrace_ops *ops)
1122 {
1123         int ret;
1124
1125         mutex_lock(&ftrace_sysctl_lock);
1126         ret = __register_ftrace_function(ops);
1127         ftrace_startup();
1128         mutex_unlock(&ftrace_sysctl_lock);
1129
1130         return ret;
1131 }
1132
1133 /**
1134  * unregister_ftrace_function - unresgister a function for profiling.
1135  * @ops - ops structure that holds the function to unregister
1136  *
1137  * Unregister a function that was added to be called by ftrace profiling.
1138  */
1139 int unregister_ftrace_function(struct ftrace_ops *ops)
1140 {
1141         int ret;
1142
1143         mutex_lock(&ftrace_sysctl_lock);
1144         ret = __unregister_ftrace_function(ops);
1145         ftrace_shutdown();
1146         mutex_unlock(&ftrace_sysctl_lock);
1147
1148         return ret;
1149 }
1150
1151 notrace int
1152 ftrace_enable_sysctl(struct ctl_table *table, int write,
1153                      struct file *file, void __user *buffer, size_t *lenp,
1154                      loff_t *ppos)
1155 {
1156         int ret;
1157
1158         mutex_lock(&ftrace_sysctl_lock);
1159
1160         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1161
1162         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1163                 goto out;
1164
1165         last_ftrace_enabled = ftrace_enabled;
1166
1167         if (ftrace_enabled) {
1168
1169                 ftrace_startup_sysctl();
1170
1171                 /* we are starting ftrace again */
1172                 if (ftrace_list != &ftrace_list_end) {
1173                         if (ftrace_list->next == &ftrace_list_end)
1174                                 ftrace_trace_function = ftrace_list->func;
1175                         else
1176                                 ftrace_trace_function = ftrace_list_func;
1177                 }
1178
1179         } else {
1180                 /* stopping ftrace calls (just send to ftrace_stub) */
1181                 ftrace_trace_function = ftrace_stub;
1182
1183                 ftrace_shutdown_sysctl();
1184         }
1185
1186  out:
1187         mutex_unlock(&ftrace_sysctl_lock);
1188         return ret;
1189 }