]> git.karo-electronics.de Git - mv-sheeva.git/blob - kernel/trace/ftrace.c
ftrace - fix dynamic ftrace memory leak
[mv-sheeva.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/kthread.h>
22 #include <linux/hardirq.h>
23 #include <linux/ftrace.h>
24 #include <linux/uaccess.h>
25 #include <linux/sysctl.h>
26 #include <linux/hash.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
29
30 #include "trace.h"
31
32 int ftrace_enabled;
33 static int last_ftrace_enabled;
34
35 static DEFINE_SPINLOCK(ftrace_lock);
36 static DEFINE_MUTEX(ftrace_sysctl_lock);
37
38 static struct ftrace_ops ftrace_list_end __read_mostly =
39 {
40         .func = ftrace_stub,
41 };
42
43 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
44 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
45
46 /* mcount is defined per arch in assembly */
47 EXPORT_SYMBOL(mcount);
48
49 notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
50 {
51         struct ftrace_ops *op = ftrace_list;
52
53         /* in case someone actually ports this to alpha! */
54         read_barrier_depends();
55
56         while (op != &ftrace_list_end) {
57                 /* silly alpha */
58                 read_barrier_depends();
59                 op->func(ip, parent_ip);
60                 op = op->next;
61         };
62 }
63
64 /**
65  * clear_ftrace_function - reset the ftrace function
66  *
67  * This NULLs the ftrace function and in essence stops
68  * tracing.  There may be lag
69  */
70 void clear_ftrace_function(void)
71 {
72         ftrace_trace_function = ftrace_stub;
73 }
74
75 static int notrace __register_ftrace_function(struct ftrace_ops *ops)
76 {
77         /* Should never be called by interrupts */
78         spin_lock(&ftrace_lock);
79
80         ops->next = ftrace_list;
81         /*
82          * We are entering ops into the ftrace_list but another
83          * CPU might be walking that list. We need to make sure
84          * the ops->next pointer is valid before another CPU sees
85          * the ops pointer included into the ftrace_list.
86          */
87         smp_wmb();
88         ftrace_list = ops;
89
90         if (ftrace_enabled) {
91                 /*
92                  * For one func, simply call it directly.
93                  * For more than one func, call the chain.
94                  */
95                 if (ops->next == &ftrace_list_end)
96                         ftrace_trace_function = ops->func;
97                 else
98                         ftrace_trace_function = ftrace_list_func;
99         }
100
101         spin_unlock(&ftrace_lock);
102
103         return 0;
104 }
105
106 static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
107 {
108         struct ftrace_ops **p;
109         int ret = 0;
110
111         spin_lock(&ftrace_lock);
112
113         /*
114          * If we are removing the last function, then simply point
115          * to the ftrace_stub.
116          */
117         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
118                 ftrace_trace_function = ftrace_stub;
119                 ftrace_list = &ftrace_list_end;
120                 goto out;
121         }
122
123         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
124                 if (*p == ops)
125                         break;
126
127         if (*p != ops) {
128                 ret = -1;
129                 goto out;
130         }
131
132         *p = (*p)->next;
133
134         if (ftrace_enabled) {
135                 /* If we only have one func left, then call that directly */
136                 if (ftrace_list == &ftrace_list_end ||
137                     ftrace_list->next == &ftrace_list_end)
138                         ftrace_trace_function = ftrace_list->func;
139         }
140
141  out:
142         spin_unlock(&ftrace_lock);
143
144         return ret;
145 }
146
147 #ifdef CONFIG_DYNAMIC_FTRACE
148
149 static struct task_struct *ftraced_task;
150 static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
151 static unsigned long ftraced_iteration_counter;
152
153 enum {
154         FTRACE_ENABLE_CALLS             = (1 << 0),
155         FTRACE_DISABLE_CALLS            = (1 << 1),
156         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
157         FTRACE_ENABLE_MCOUNT            = (1 << 3),
158         FTRACE_DISABLE_MCOUNT           = (1 << 4),
159 };
160
161 static int ftrace_filtered;
162
163 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
164
165 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
166
167 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
168 static DEFINE_MUTEX(ftraced_lock);
169 static DEFINE_MUTEX(ftrace_filter_lock);
170
171 struct ftrace_page {
172         struct ftrace_page      *next;
173         int                     index;
174         struct dyn_ftrace       records[];
175 } __attribute__((packed));
176
177 #define ENTRIES_PER_PAGE \
178   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
179
180 /* estimate from running different kernels */
181 #define NR_TO_INIT              10000
182
183 static struct ftrace_page       *ftrace_pages_start;
184 static struct ftrace_page       *ftrace_pages;
185
186 static int ftraced_trigger;
187 static int ftraced_suspend;
188
189 static int ftrace_record_suspend;
190
191 static struct dyn_ftrace *ftrace_free_records;
192
193 static inline int
194 notrace ftrace_ip_in_hash(unsigned long ip, unsigned long key)
195 {
196         struct dyn_ftrace *p;
197         struct hlist_node *t;
198         int found = 0;
199
200         hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
201                 if (p->ip == ip) {
202                         found = 1;
203                         break;
204                 }
205         }
206
207         return found;
208 }
209
210 static inline void notrace
211 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
212 {
213         hlist_add_head(&node->node, &ftrace_hash[key]);
214 }
215
216 static notrace void ftrace_free_rec(struct dyn_ftrace *rec)
217 {
218         /* no locking, only called from kstop_machine */
219
220         rec->ip = (unsigned long)ftrace_free_records;
221         ftrace_free_records = rec;
222         rec->flags |= FTRACE_FL_FREE;
223 }
224
225 static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
226 {
227         struct dyn_ftrace *rec;
228
229         /* First check for freed records */
230         if (ftrace_free_records) {
231                 rec = ftrace_free_records;
232
233                 /* todo, disable tracing altogether on this warning */
234                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
235                         WARN_ON_ONCE(1);
236                         ftrace_free_records = NULL;
237                         return NULL;
238                 }
239
240                 ftrace_free_records = (void *)rec->ip;
241                 memset(rec, 0, sizeof(*rec));
242                 return rec;
243         }
244
245         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
246                 if (!ftrace_pages->next)
247                         return NULL;
248                 ftrace_pages = ftrace_pages->next;
249         }
250
251         return &ftrace_pages->records[ftrace_pages->index++];
252 }
253
254 static void notrace
255 ftrace_record_ip(unsigned long ip)
256 {
257         struct dyn_ftrace *node;
258         unsigned long flags;
259         unsigned long key;
260         int resched;
261         int atomic;
262
263         if (!ftrace_enabled)
264                 return;
265
266         resched = need_resched();
267         preempt_disable_notrace();
268
269         /* We simply need to protect against recursion */
270         __get_cpu_var(ftrace_shutdown_disable_cpu)++;
271         if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1)
272                 goto out;
273
274         if (unlikely(ftrace_record_suspend))
275                 goto out;
276
277         key = hash_long(ip, FTRACE_HASHBITS);
278
279         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
280
281         if (ftrace_ip_in_hash(ip, key))
282                 goto out;
283
284         atomic = irqs_disabled();
285
286         spin_lock_irqsave(&ftrace_shutdown_lock, flags);
287
288         /* This ip may have hit the hash before the lock */
289         if (ftrace_ip_in_hash(ip, key))
290                 goto out_unlock;
291
292         /*
293          * There's a slight race that the ftraced will update the
294          * hash and reset here. If it is already converted, skip it.
295          */
296         if (ftrace_ip_converted(ip))
297                 goto out_unlock;
298
299         node = ftrace_alloc_dyn_node(ip);
300         if (!node)
301                 goto out_unlock;
302
303         node->ip = ip;
304
305         ftrace_add_hash(node, key);
306
307         ftraced_trigger = 1;
308
309  out_unlock:
310         spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
311  out:
312         __get_cpu_var(ftrace_shutdown_disable_cpu)--;
313
314         /* prevent recursion with scheduler */
315         if (resched)
316                 preempt_enable_no_resched_notrace();
317         else
318                 preempt_enable_notrace();
319 }
320
321 #define FTRACE_ADDR ((long)(&ftrace_caller))
322 #define MCOUNT_ADDR ((long)(&mcount))
323
324 static void notrace
325 __ftrace_replace_code(struct dyn_ftrace *rec,
326                       unsigned char *old, unsigned char *new, int enable)
327 {
328         unsigned long ip;
329         int failed;
330
331         ip = rec->ip;
332
333         if (ftrace_filtered && enable) {
334                 unsigned long fl;
335                 /*
336                  * If filtering is on:
337                  *
338                  * If this record is set to be filtered and
339                  * is enabled then do nothing.
340                  *
341                  * If this record is set to be filtered and
342                  * it is not enabled, enable it.
343                  *
344                  * If this record is not set to be filtered
345                  * and it is not enabled do nothing.
346                  *
347                  * If this record is not set to be filtered and
348                  * it is enabled, disable it.
349                  */
350                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
351
352                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
353                     (fl == 0))
354                         return;
355
356                 /*
357                  * If it is enabled disable it,
358                  * otherwise enable it!
359                  */
360                 if (fl == FTRACE_FL_ENABLED) {
361                         /* swap new and old */
362                         new = old;
363                         old = ftrace_call_replace(ip, FTRACE_ADDR);
364                         rec->flags &= ~FTRACE_FL_ENABLED;
365                 } else {
366                         new = ftrace_call_replace(ip, FTRACE_ADDR);
367                         rec->flags |= FTRACE_FL_ENABLED;
368                 }
369         } else {
370
371                 if (enable)
372                         new = ftrace_call_replace(ip, FTRACE_ADDR);
373                 else
374                         old = ftrace_call_replace(ip, FTRACE_ADDR);
375
376                 if (enable) {
377                         if (rec->flags & FTRACE_FL_ENABLED)
378                                 return;
379                         rec->flags |= FTRACE_FL_ENABLED;
380                 } else {
381                         if (!(rec->flags & FTRACE_FL_ENABLED))
382                                 return;
383                         rec->flags &= ~FTRACE_FL_ENABLED;
384                 }
385         }
386
387         failed = ftrace_modify_code(ip, old, new);
388         if (failed) {
389                 unsigned long key;
390                 /* It is possible that the function hasn't been converted yet */
391                 key = hash_long(ip, FTRACE_HASHBITS);
392                 if (!ftrace_ip_in_hash(ip, key)) {
393                         rec->flags |= FTRACE_FL_FAILED;
394                         ftrace_free_rec(rec);
395                 }
396
397         }
398 }
399
400 static void notrace ftrace_replace_code(int enable)
401 {
402         unsigned char *new = NULL, *old = NULL;
403         struct dyn_ftrace *rec;
404         struct ftrace_page *pg;
405         int i;
406
407         if (enable)
408                 old = ftrace_nop_replace();
409         else
410                 new = ftrace_nop_replace();
411
412         for (pg = ftrace_pages_start; pg; pg = pg->next) {
413                 for (i = 0; i < pg->index; i++) {
414                         rec = &pg->records[i];
415
416                         /* don't modify code that has already faulted */
417                         if (rec->flags & FTRACE_FL_FAILED)
418                                 continue;
419
420                         __ftrace_replace_code(rec, old, new, enable);
421                 }
422         }
423 }
424
425 static notrace void ftrace_shutdown_replenish(void)
426 {
427         if (ftrace_pages->next)
428                 return;
429
430         /* allocate another page */
431         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
432 }
433
434 static notrace void
435 ftrace_code_disable(struct dyn_ftrace *rec)
436 {
437         unsigned long ip;
438         unsigned char *nop, *call;
439         int failed;
440
441         ip = rec->ip;
442
443         nop = ftrace_nop_replace();
444         call = ftrace_call_replace(ip, MCOUNT_ADDR);
445
446         failed = ftrace_modify_code(ip, call, nop);
447         if (failed) {
448                 rec->flags |= FTRACE_FL_FAILED;
449                 ftrace_free_rec(rec);
450         }
451 }
452
453 static int notrace __ftrace_modify_code(void *data)
454 {
455         unsigned long addr;
456         int *command = data;
457
458         if (*command & FTRACE_ENABLE_CALLS)
459                 ftrace_replace_code(1);
460         else if (*command & FTRACE_DISABLE_CALLS)
461                 ftrace_replace_code(0);
462
463         if (*command & FTRACE_UPDATE_TRACE_FUNC)
464                 ftrace_update_ftrace_func(ftrace_trace_function);
465
466         if (*command & FTRACE_ENABLE_MCOUNT) {
467                 addr = (unsigned long)ftrace_record_ip;
468                 ftrace_mcount_set(&addr);
469         } else if (*command & FTRACE_DISABLE_MCOUNT) {
470                 addr = (unsigned long)ftrace_stub;
471                 ftrace_mcount_set(&addr);
472         }
473
474         return 0;
475 }
476
477 static void notrace ftrace_run_update_code(int command)
478 {
479         stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
480 }
481
482 static ftrace_func_t saved_ftrace_func;
483
484 static void notrace ftrace_startup(void)
485 {
486         int command = 0;
487
488         mutex_lock(&ftraced_lock);
489         ftraced_suspend++;
490         if (ftraced_suspend == 1)
491                 command |= FTRACE_ENABLE_CALLS;
492
493         if (saved_ftrace_func != ftrace_trace_function) {
494                 saved_ftrace_func = ftrace_trace_function;
495                 command |= FTRACE_UPDATE_TRACE_FUNC;
496         }
497
498         if (!command || !ftrace_enabled)
499                 goto out;
500
501         ftrace_run_update_code(command);
502  out:
503         mutex_unlock(&ftraced_lock);
504 }
505
506 static void notrace ftrace_shutdown(void)
507 {
508         int command = 0;
509
510         mutex_lock(&ftraced_lock);
511         ftraced_suspend--;
512         if (!ftraced_suspend)
513                 command |= FTRACE_DISABLE_CALLS;
514
515         if (saved_ftrace_func != ftrace_trace_function) {
516                 saved_ftrace_func = ftrace_trace_function;
517                 command |= FTRACE_UPDATE_TRACE_FUNC;
518         }
519
520         if (!command || !ftrace_enabled)
521                 goto out;
522
523         ftrace_run_update_code(command);
524  out:
525         mutex_unlock(&ftraced_lock);
526 }
527
528 static void notrace ftrace_startup_sysctl(void)
529 {
530         int command = FTRACE_ENABLE_MCOUNT;
531
532         mutex_lock(&ftraced_lock);
533         /* Force update next time */
534         saved_ftrace_func = NULL;
535         /* ftraced_suspend is true if we want ftrace running */
536         if (ftraced_suspend)
537                 command |= FTRACE_ENABLE_CALLS;
538
539         ftrace_run_update_code(command);
540         mutex_unlock(&ftraced_lock);
541 }
542
543 static void notrace ftrace_shutdown_sysctl(void)
544 {
545         int command = FTRACE_DISABLE_MCOUNT;
546
547         mutex_lock(&ftraced_lock);
548         /* ftraced_suspend is true if ftrace is running */
549         if (ftraced_suspend)
550                 command |= FTRACE_DISABLE_CALLS;
551
552         ftrace_run_update_code(command);
553         mutex_unlock(&ftraced_lock);
554 }
555
556 static cycle_t          ftrace_update_time;
557 static unsigned long    ftrace_update_cnt;
558 unsigned long           ftrace_update_tot_cnt;
559
560 static int notrace __ftrace_update_code(void *ignore)
561 {
562         struct dyn_ftrace *p;
563         struct hlist_head head;
564         struct hlist_node *t;
565         int save_ftrace_enabled;
566         cycle_t start, stop;
567         int i;
568
569         /* Don't be recording funcs now */
570         save_ftrace_enabled = ftrace_enabled;
571         ftrace_enabled = 0;
572
573         start = ftrace_now(raw_smp_processor_id());
574         ftrace_update_cnt = 0;
575
576         /* No locks needed, the machine is stopped! */
577         for (i = 0; i < FTRACE_HASHSIZE; i++) {
578                 if (hlist_empty(&ftrace_hash[i]))
579                         continue;
580
581                 head = ftrace_hash[i];
582                 INIT_HLIST_HEAD(&ftrace_hash[i]);
583
584                 /* all CPUS are stopped, we are safe to modify code */
585                 hlist_for_each_entry(p, t, &head, node) {
586                         ftrace_code_disable(p);
587                         ftrace_update_cnt++;
588                 }
589
590         }
591
592         stop = ftrace_now(raw_smp_processor_id());
593         ftrace_update_time = stop - start;
594         ftrace_update_tot_cnt += ftrace_update_cnt;
595
596         ftrace_enabled = save_ftrace_enabled;
597
598         return 0;
599 }
600
601 static void notrace ftrace_update_code(void)
602 {
603         stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
604 }
605
606 static int notrace ftraced(void *ignore)
607 {
608         unsigned long usecs;
609
610         set_current_state(TASK_INTERRUPTIBLE);
611
612         while (!kthread_should_stop()) {
613
614                 /* check once a second */
615                 schedule_timeout(HZ);
616
617                 mutex_lock(&ftrace_sysctl_lock);
618                 mutex_lock(&ftraced_lock);
619                 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
620                         ftrace_record_suspend++;
621                         ftrace_update_code();
622                         usecs = nsecs_to_usecs(ftrace_update_time);
623                         if (ftrace_update_tot_cnt > 100000) {
624                                 ftrace_update_tot_cnt = 0;
625                                 pr_info("hm, dftrace overflow: %lu change%s"
626                                          " (%lu total) in %lu usec%s\n",
627                                         ftrace_update_cnt,
628                                         ftrace_update_cnt != 1 ? "s" : "",
629                                         ftrace_update_tot_cnt,
630                                         usecs, usecs != 1 ? "s" : "");
631                                 WARN_ON_ONCE(1);
632                         }
633                         ftraced_trigger = 0;
634                         ftrace_record_suspend--;
635                 }
636                 ftraced_iteration_counter++;
637                 mutex_unlock(&ftraced_lock);
638                 mutex_unlock(&ftrace_sysctl_lock);
639
640                 wake_up_interruptible(&ftraced_waiters);
641
642                 ftrace_shutdown_replenish();
643
644                 set_current_state(TASK_INTERRUPTIBLE);
645         }
646         __set_current_state(TASK_RUNNING);
647         return 0;
648 }
649
650 static int __init ftrace_dyn_table_alloc(void)
651 {
652         struct ftrace_page *pg;
653         int cnt;
654         int i;
655
656         /* allocate a few pages */
657         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
658         if (!ftrace_pages_start)
659                 return -1;
660
661         /*
662          * Allocate a few more pages.
663          *
664          * TODO: have some parser search vmlinux before
665          *   final linking to find all calls to ftrace.
666          *   Then we can:
667          *    a) know how many pages to allocate.
668          *     and/or
669          *    b) set up the table then.
670          *
671          *  The dynamic code is still necessary for
672          *  modules.
673          */
674
675         pg = ftrace_pages = ftrace_pages_start;
676
677         cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
678
679         for (i = 0; i < cnt; i++) {
680                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
681
682                 /* If we fail, we'll try later anyway */
683                 if (!pg->next)
684                         break;
685
686                 pg = pg->next;
687         }
688
689         return 0;
690 }
691
692 enum {
693         FTRACE_ITER_FILTER      = (1 << 0),
694         FTRACE_ITER_CONT        = (1 << 1),
695 };
696
697 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
698
699 struct ftrace_iterator {
700         loff_t                  pos;
701         struct ftrace_page      *pg;
702         unsigned                idx;
703         unsigned                flags;
704         unsigned char           buffer[FTRACE_BUFF_MAX+1];
705         unsigned                buffer_idx;
706         unsigned                filtered;
707 };
708
709 static void notrace *
710 t_next(struct seq_file *m, void *v, loff_t *pos)
711 {
712         struct ftrace_iterator *iter = m->private;
713         struct dyn_ftrace *rec = NULL;
714
715         (*pos)++;
716
717  retry:
718         if (iter->idx >= iter->pg->index) {
719                 if (iter->pg->next) {
720                         iter->pg = iter->pg->next;
721                         iter->idx = 0;
722                         goto retry;
723                 }
724         } else {
725                 rec = &iter->pg->records[iter->idx++];
726                 if ((rec->flags & FTRACE_FL_FAILED) ||
727                     ((iter->flags & FTRACE_ITER_FILTER) &&
728                      !(rec->flags & FTRACE_FL_FILTER))) {
729                         rec = NULL;
730                         goto retry;
731                 }
732         }
733
734         iter->pos = *pos;
735
736         return rec;
737 }
738
739 static void *t_start(struct seq_file *m, loff_t *pos)
740 {
741         struct ftrace_iterator *iter = m->private;
742         void *p = NULL;
743         loff_t l = -1;
744
745         if (*pos != iter->pos) {
746                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
747                         ;
748         } else {
749                 l = *pos;
750                 p = t_next(m, p, &l);
751         }
752
753         return p;
754 }
755
756 static void t_stop(struct seq_file *m, void *p)
757 {
758 }
759
760 static int t_show(struct seq_file *m, void *v)
761 {
762         struct dyn_ftrace *rec = v;
763         char str[KSYM_SYMBOL_LEN];
764
765         if (!rec)
766                 return 0;
767
768         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
769
770         seq_printf(m, "%s\n", str);
771
772         return 0;
773 }
774
775 static struct seq_operations show_ftrace_seq_ops = {
776         .start = t_start,
777         .next = t_next,
778         .stop = t_stop,
779         .show = t_show,
780 };
781
782 static int notrace
783 ftrace_avail_open(struct inode *inode, struct file *file)
784 {
785         struct ftrace_iterator *iter;
786         int ret;
787
788         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
789         if (!iter)
790                 return -ENOMEM;
791
792         iter->pg = ftrace_pages_start;
793         iter->pos = -1;
794
795         ret = seq_open(file, &show_ftrace_seq_ops);
796         if (!ret) {
797                 struct seq_file *m = file->private_data;
798
799                 m->private = iter;
800         } else {
801                 kfree(iter);
802         }
803
804         return ret;
805 }
806
807 int ftrace_avail_release(struct inode *inode, struct file *file)
808 {
809         struct seq_file *m = (struct seq_file *)file->private_data;
810         struct ftrace_iterator *iter = m->private;
811
812         seq_release(inode, file);
813         kfree(iter);
814
815         return 0;
816 }
817
818 static void notrace ftrace_filter_reset(void)
819 {
820         struct ftrace_page *pg;
821         struct dyn_ftrace *rec;
822         unsigned i;
823
824         /* keep kstop machine from running */
825         preempt_disable();
826         ftrace_filtered = 0;
827         pg = ftrace_pages_start;
828         while (pg) {
829                 for (i = 0; i < pg->index; i++) {
830                         rec = &pg->records[i];
831                         if (rec->flags & FTRACE_FL_FAILED)
832                                 continue;
833                         rec->flags &= ~FTRACE_FL_FILTER;
834                 }
835                 pg = pg->next;
836         }
837         preempt_enable();
838 }
839
840 static int notrace
841 ftrace_filter_open(struct inode *inode, struct file *file)
842 {
843         struct ftrace_iterator *iter;
844         int ret = 0;
845
846         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
847         if (!iter)
848                 return -ENOMEM;
849
850         mutex_lock(&ftrace_filter_lock);
851         if ((file->f_mode & FMODE_WRITE) &&
852             !(file->f_flags & O_APPEND))
853                 ftrace_filter_reset();
854
855         if (file->f_mode & FMODE_READ) {
856                 iter->pg = ftrace_pages_start;
857                 iter->pos = -1;
858                 iter->flags = FTRACE_ITER_FILTER;
859
860                 ret = seq_open(file, &show_ftrace_seq_ops);
861                 if (!ret) {
862                         struct seq_file *m = file->private_data;
863                         m->private = iter;
864                 } else
865                         kfree(iter);
866         } else
867                 file->private_data = iter;
868         mutex_unlock(&ftrace_filter_lock);
869
870         return ret;
871 }
872
873 static ssize_t notrace
874 ftrace_filter_read(struct file *file, char __user *ubuf,
875                        size_t cnt, loff_t *ppos)
876 {
877         if (file->f_mode & FMODE_READ)
878                 return seq_read(file, ubuf, cnt, ppos);
879         else
880                 return -EPERM;
881 }
882
883 static loff_t notrace
884 ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
885 {
886         loff_t ret;
887
888         if (file->f_mode & FMODE_READ)
889                 ret = seq_lseek(file, offset, origin);
890         else
891                 file->f_pos = ret = 1;
892
893         return ret;
894 }
895
896 enum {
897         MATCH_FULL,
898         MATCH_FRONT_ONLY,
899         MATCH_MIDDLE_ONLY,
900         MATCH_END_ONLY,
901 };
902
903 static void notrace
904 ftrace_match(unsigned char *buff, int len)
905 {
906         char str[KSYM_SYMBOL_LEN];
907         char *search = NULL;
908         struct ftrace_page *pg;
909         struct dyn_ftrace *rec;
910         int type = MATCH_FULL;
911         unsigned i, match = 0, search_len = 0;
912
913         for (i = 0; i < len; i++) {
914                 if (buff[i] == '*') {
915                         if (!i) {
916                                 search = buff + i + 1;
917                                 type = MATCH_END_ONLY;
918                                 search_len = len - (i + 1);
919                         } else {
920                                 if (type == MATCH_END_ONLY) {
921                                         type = MATCH_MIDDLE_ONLY;
922                                 } else {
923                                         match = i;
924                                         type = MATCH_FRONT_ONLY;
925                                 }
926                                 buff[i] = 0;
927                                 break;
928                         }
929                 }
930         }
931
932         /* keep kstop machine from running */
933         preempt_disable();
934         ftrace_filtered = 1;
935         pg = ftrace_pages_start;
936         while (pg) {
937                 for (i = 0; i < pg->index; i++) {
938                         int matched = 0;
939                         char *ptr;
940
941                         rec = &pg->records[i];
942                         if (rec->flags & FTRACE_FL_FAILED)
943                                 continue;
944                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
945                         switch (type) {
946                         case MATCH_FULL:
947                                 if (strcmp(str, buff) == 0)
948                                         matched = 1;
949                                 break;
950                         case MATCH_FRONT_ONLY:
951                                 if (memcmp(str, buff, match) == 0)
952                                         matched = 1;
953                                 break;
954                         case MATCH_MIDDLE_ONLY:
955                                 if (strstr(str, search))
956                                         matched = 1;
957                                 break;
958                         case MATCH_END_ONLY:
959                                 ptr = strstr(str, search);
960                                 if (ptr && (ptr[search_len] == 0))
961                                         matched = 1;
962                                 break;
963                         }
964                         if (matched)
965                                 rec->flags |= FTRACE_FL_FILTER;
966                 }
967                 pg = pg->next;
968         }
969         preempt_enable();
970 }
971
972 static ssize_t notrace
973 ftrace_filter_write(struct file *file, const char __user *ubuf,
974                     size_t cnt, loff_t *ppos)
975 {
976         struct ftrace_iterator *iter;
977         char ch;
978         size_t read = 0;
979         ssize_t ret;
980
981         if (!cnt || cnt < 0)
982                 return 0;
983
984         mutex_lock(&ftrace_filter_lock);
985
986         if (file->f_mode & FMODE_READ) {
987                 struct seq_file *m = file->private_data;
988                 iter = m->private;
989         } else
990                 iter = file->private_data;
991
992         if (!*ppos) {
993                 iter->flags &= ~FTRACE_ITER_CONT;
994                 iter->buffer_idx = 0;
995         }
996
997         ret = get_user(ch, ubuf++);
998         if (ret)
999                 goto out;
1000         read++;
1001         cnt--;
1002
1003         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1004                 /* skip white space */
1005                 while (cnt && isspace(ch)) {
1006                         ret = get_user(ch, ubuf++);
1007                         if (ret)
1008                                 goto out;
1009                         read++;
1010                         cnt--;
1011                 }
1012
1013
1014                 if (isspace(ch)) {
1015                         file->f_pos += read;
1016                         ret = read;
1017                         goto out;
1018                 }
1019
1020                 iter->buffer_idx = 0;
1021         }
1022
1023         while (cnt && !isspace(ch)) {
1024                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1025                         iter->buffer[iter->buffer_idx++] = ch;
1026                 else {
1027                         ret = -EINVAL;
1028                         goto out;
1029                 }
1030                 ret = get_user(ch, ubuf++);
1031                 if (ret)
1032                         goto out;
1033                 read++;
1034                 cnt--;
1035         }
1036
1037         if (isspace(ch)) {
1038                 iter->filtered++;
1039                 iter->buffer[iter->buffer_idx] = 0;
1040                 ftrace_match(iter->buffer, iter->buffer_idx);
1041                 iter->buffer_idx = 0;
1042         } else
1043                 iter->flags |= FTRACE_ITER_CONT;
1044
1045
1046         file->f_pos += read;
1047
1048         ret = read;
1049  out:
1050         mutex_unlock(&ftrace_filter_lock);
1051
1052         return ret;
1053 }
1054
1055 /**
1056  * ftrace_set_filter - set a function to filter on in ftrace
1057  * @buf - the string that holds the function filter text.
1058  * @len - the length of the string.
1059  * @reset - non zero to reset all filters before applying this filter.
1060  *
1061  * Filters denote which functions should be enabled when tracing is enabled.
1062  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1063  */
1064 notrace void ftrace_set_filter(unsigned char *buf, int len, int reset)
1065 {
1066         mutex_lock(&ftrace_filter_lock);
1067         if (reset)
1068                 ftrace_filter_reset();
1069         if (buf)
1070                 ftrace_match(buf, len);
1071         mutex_unlock(&ftrace_filter_lock);
1072 }
1073
1074 static int notrace
1075 ftrace_filter_release(struct inode *inode, struct file *file)
1076 {
1077         struct seq_file *m = (struct seq_file *)file->private_data;
1078         struct ftrace_iterator *iter;
1079
1080         mutex_lock(&ftrace_filter_lock);
1081         if (file->f_mode & FMODE_READ) {
1082                 iter = m->private;
1083
1084                 seq_release(inode, file);
1085         } else
1086                 iter = file->private_data;
1087
1088         if (iter->buffer_idx) {
1089                 iter->filtered++;
1090                 iter->buffer[iter->buffer_idx] = 0;
1091                 ftrace_match(iter->buffer, iter->buffer_idx);
1092         }
1093
1094         mutex_lock(&ftrace_sysctl_lock);
1095         mutex_lock(&ftraced_lock);
1096         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1097                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1098         mutex_unlock(&ftraced_lock);
1099         mutex_unlock(&ftrace_sysctl_lock);
1100
1101         kfree(iter);
1102         mutex_unlock(&ftrace_filter_lock);
1103         return 0;
1104 }
1105
1106 static struct file_operations ftrace_avail_fops = {
1107         .open = ftrace_avail_open,
1108         .read = seq_read,
1109         .llseek = seq_lseek,
1110         .release = ftrace_avail_release,
1111 };
1112
1113 static struct file_operations ftrace_filter_fops = {
1114         .open = ftrace_filter_open,
1115         .read = ftrace_filter_read,
1116         .write = ftrace_filter_write,
1117         .llseek = ftrace_filter_lseek,
1118         .release = ftrace_filter_release,
1119 };
1120
1121 /**
1122  * ftrace_force_update - force an update to all recording ftrace functions
1123  *
1124  * The ftrace dynamic update daemon only wakes up once a second.
1125  * There may be cases where an update needs to be done immediately
1126  * for tests or internal kernel tracing to begin. This function
1127  * wakes the daemon to do an update and will not return until the
1128  * update is complete.
1129  */
1130 int ftrace_force_update(void)
1131 {
1132         unsigned long last_counter;
1133         DECLARE_WAITQUEUE(wait, current);
1134         int ret = 0;
1135
1136         if (!ftraced_task)
1137                 return -ENODEV;
1138
1139         mutex_lock(&ftraced_lock);
1140         last_counter = ftraced_iteration_counter;
1141
1142         set_current_state(TASK_INTERRUPTIBLE);
1143         add_wait_queue(&ftraced_waiters, &wait);
1144
1145         do {
1146                 mutex_unlock(&ftraced_lock);
1147                 wake_up_process(ftraced_task);
1148                 schedule();
1149                 mutex_lock(&ftraced_lock);
1150                 if (signal_pending(current)) {
1151                         ret = -EINTR;
1152                         break;
1153                 }
1154                 set_current_state(TASK_INTERRUPTIBLE);
1155         } while (last_counter == ftraced_iteration_counter);
1156
1157         mutex_unlock(&ftraced_lock);
1158         remove_wait_queue(&ftraced_waiters, &wait);
1159         set_current_state(TASK_RUNNING);
1160
1161         return ret;
1162 }
1163
1164 static __init int ftrace_init_debugfs(void)
1165 {
1166         struct dentry *d_tracer;
1167         struct dentry *entry;
1168
1169         d_tracer = tracing_init_dentry();
1170
1171         entry = debugfs_create_file("available_filter_functions", 0444,
1172                                     d_tracer, NULL, &ftrace_avail_fops);
1173         if (!entry)
1174                 pr_warning("Could not create debugfs "
1175                            "'available_filter_functions' entry\n");
1176
1177         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1178                                     NULL, &ftrace_filter_fops);
1179         if (!entry)
1180                 pr_warning("Could not create debugfs "
1181                            "'set_ftrace_filter' entry\n");
1182         return 0;
1183 }
1184
1185 fs_initcall(ftrace_init_debugfs);
1186
1187 static int __init notrace ftrace_dynamic_init(void)
1188 {
1189         struct task_struct *p;
1190         unsigned long addr;
1191         int ret;
1192
1193         addr = (unsigned long)ftrace_record_ip;
1194         stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1195
1196         /* ftrace_dyn_arch_init places the return code in addr */
1197         if (addr)
1198                 return addr;
1199
1200         ret = ftrace_dyn_table_alloc();
1201         if (ret)
1202                 return ret;
1203
1204         p = kthread_run(ftraced, NULL, "ftraced");
1205         if (IS_ERR(p))
1206                 return -1;
1207
1208         last_ftrace_enabled = ftrace_enabled = 1;
1209         ftraced_task = p;
1210
1211         return 0;
1212 }
1213
1214 core_initcall(ftrace_dynamic_init);
1215 #else
1216 # define ftrace_startup()               do { } while (0)
1217 # define ftrace_shutdown()              do { } while (0)
1218 # define ftrace_startup_sysctl()        do { } while (0)
1219 # define ftrace_shutdown_sysctl()       do { } while (0)
1220 #endif /* CONFIG_DYNAMIC_FTRACE */
1221
1222 /**
1223  * register_ftrace_function - register a function for profiling
1224  * @ops - ops structure that holds the function for profiling.
1225  *
1226  * Register a function to be called by all functions in the
1227  * kernel.
1228  *
1229  * Note: @ops->func and all the functions it calls must be labeled
1230  *       with "notrace", otherwise it will go into a
1231  *       recursive loop.
1232  */
1233 int register_ftrace_function(struct ftrace_ops *ops)
1234 {
1235         int ret;
1236
1237         mutex_lock(&ftrace_sysctl_lock);
1238         ret = __register_ftrace_function(ops);
1239         ftrace_startup();
1240         mutex_unlock(&ftrace_sysctl_lock);
1241
1242         return ret;
1243 }
1244
1245 /**
1246  * unregister_ftrace_function - unresgister a function for profiling.
1247  * @ops - ops structure that holds the function to unregister
1248  *
1249  * Unregister a function that was added to be called by ftrace profiling.
1250  */
1251 int unregister_ftrace_function(struct ftrace_ops *ops)
1252 {
1253         int ret;
1254
1255         mutex_lock(&ftrace_sysctl_lock);
1256         ret = __unregister_ftrace_function(ops);
1257         ftrace_shutdown();
1258         mutex_unlock(&ftrace_sysctl_lock);
1259
1260         return ret;
1261 }
1262
1263 notrace int
1264 ftrace_enable_sysctl(struct ctl_table *table, int write,
1265                      struct file *file, void __user *buffer, size_t *lenp,
1266                      loff_t *ppos)
1267 {
1268         int ret;
1269
1270         mutex_lock(&ftrace_sysctl_lock);
1271
1272         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1273
1274         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1275                 goto out;
1276
1277         last_ftrace_enabled = ftrace_enabled;
1278
1279         if (ftrace_enabled) {
1280
1281                 ftrace_startup_sysctl();
1282
1283                 /* we are starting ftrace again */
1284                 if (ftrace_list != &ftrace_list_end) {
1285                         if (ftrace_list->next == &ftrace_list_end)
1286                                 ftrace_trace_function = ftrace_list->func;
1287                         else
1288                                 ftrace_trace_function = ftrace_list_func;
1289                 }
1290
1291         } else {
1292                 /* stopping ftrace calls (just send to ftrace_stub) */
1293                 ftrace_trace_function = ftrace_stub;
1294
1295                 ftrace_shutdown_sysctl();
1296         }
1297
1298  out:
1299         mutex_unlock(&ftrace_sysctl_lock);
1300         return ret;
1301 }