]> git.karo-electronics.de Git - karo-tx-linux.git/blob - kernel/trace/ftrace.c
ftrace: Add enable/disable ftrace_ops control interface
[karo-tx-linux.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35
36 #include <trace/events/sched.h>
37
38 #include <asm/setup.h>
39
40 #include "trace_output.h"
41 #include "trace_stat.h"
42
43 #define FTRACE_WARN_ON(cond)                    \
44         ({                                      \
45                 int ___r = cond;                \
46                 if (WARN_ON(___r))              \
47                         ftrace_kill();          \
48                 ___r;                           \
49         })
50
51 #define FTRACE_WARN_ON_ONCE(cond)               \
52         ({                                      \
53                 int ___r = cond;                \
54                 if (WARN_ON_ONCE(___r))         \
55                         ftrace_kill();          \
56                 ___r;                           \
57         })
58
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
67 /* ftrace_enabled is a method to turn ftrace on or off */
68 int ftrace_enabled __read_mostly;
69 static int last_ftrace_enabled;
70
71 /* Quick disabling of function tracer. */
72 int function_trace_stop;
73
74 /* List for set_ftrace_pid's pids. */
75 LIST_HEAD(ftrace_pids);
76 struct ftrace_pid {
77         struct list_head list;
78         struct pid *pid;
79 };
80
81 /*
82  * ftrace_disabled is set when an anomaly is discovered.
83  * ftrace_disabled is much stronger than ftrace_enabled.
84  */
85 static int ftrace_disabled __read_mostly;
86
87 static DEFINE_MUTEX(ftrace_lock);
88
89 static struct ftrace_ops ftrace_list_end __read_mostly = {
90         .func           = ftrace_stub,
91 };
92
93 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
94 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
95 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
96 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
97 static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
98 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
99 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
100 static struct ftrace_ops global_ops;
101 static struct ftrace_ops control_ops;
102
103 static void
104 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
105
106 /*
107  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
108  * can use rcu_dereference_raw() is that elements removed from this list
109  * are simply leaked, so there is no need to interact with a grace-period
110  * mechanism.  The rcu_dereference_raw() calls are needed to handle
111  * concurrent insertions into the ftrace_global_list.
112  *
113  * Silly Alpha and silly pointer-speculation compiler optimizations!
114  */
115 static void ftrace_global_list_func(unsigned long ip,
116                                     unsigned long parent_ip)
117 {
118         struct ftrace_ops *op;
119
120         if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
121                 return;
122
123         trace_recursion_set(TRACE_GLOBAL_BIT);
124         op = rcu_dereference_raw(ftrace_global_list); /*see above*/
125         while (op != &ftrace_list_end) {
126                 op->func(ip, parent_ip);
127                 op = rcu_dereference_raw(op->next); /*see above*/
128         };
129         trace_recursion_clear(TRACE_GLOBAL_BIT);
130 }
131
132 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
133 {
134         if (!test_tsk_trace_trace(current))
135                 return;
136
137         ftrace_pid_function(ip, parent_ip);
138 }
139
140 static void set_ftrace_pid_function(ftrace_func_t func)
141 {
142         /* do not set ftrace_pid_function to itself! */
143         if (func != ftrace_pid_func)
144                 ftrace_pid_function = func;
145 }
146
147 /**
148  * clear_ftrace_function - reset the ftrace function
149  *
150  * This NULLs the ftrace function and in essence stops
151  * tracing.  There may be lag
152  */
153 void clear_ftrace_function(void)
154 {
155         ftrace_trace_function = ftrace_stub;
156         __ftrace_trace_function = ftrace_stub;
157         __ftrace_trace_function_delay = ftrace_stub;
158         ftrace_pid_function = ftrace_stub;
159 }
160
161 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
162 /*
163  * For those archs that do not test ftrace_trace_stop in their
164  * mcount call site, we need to do it from C.
165  */
166 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
167 {
168         if (function_trace_stop)
169                 return;
170
171         __ftrace_trace_function(ip, parent_ip);
172 }
173 #endif
174
175 static void control_ops_disable_all(struct ftrace_ops *ops)
176 {
177         int cpu;
178
179         for_each_possible_cpu(cpu)
180                 *per_cpu_ptr(ops->disabled, cpu) = 1;
181 }
182
183 static int control_ops_alloc(struct ftrace_ops *ops)
184 {
185         int __percpu *disabled;
186
187         disabled = alloc_percpu(int);
188         if (!disabled)
189                 return -ENOMEM;
190
191         ops->disabled = disabled;
192         control_ops_disable_all(ops);
193         return 0;
194 }
195
196 static void control_ops_free(struct ftrace_ops *ops)
197 {
198         free_percpu(ops->disabled);
199 }
200
201 static void update_global_ops(void)
202 {
203         ftrace_func_t func;
204
205         /*
206          * If there's only one function registered, then call that
207          * function directly. Otherwise, we need to iterate over the
208          * registered callers.
209          */
210         if (ftrace_global_list == &ftrace_list_end ||
211             ftrace_global_list->next == &ftrace_list_end)
212                 func = ftrace_global_list->func;
213         else
214                 func = ftrace_global_list_func;
215
216         /* If we filter on pids, update to use the pid function */
217         if (!list_empty(&ftrace_pids)) {
218                 set_ftrace_pid_function(func);
219                 func = ftrace_pid_func;
220         }
221
222         global_ops.func = func;
223 }
224
225 static void update_ftrace_function(void)
226 {
227         ftrace_func_t func;
228
229         update_global_ops();
230
231         /*
232          * If we are at the end of the list and this ops is
233          * not dynamic, then have the mcount trampoline call
234          * the function directly
235          */
236         if (ftrace_ops_list == &ftrace_list_end ||
237             (ftrace_ops_list->next == &ftrace_list_end &&
238              !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
239                 func = ftrace_ops_list->func;
240         else
241                 func = ftrace_ops_list_func;
242
243 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
244         ftrace_trace_function = func;
245 #else
246 #ifdef CONFIG_DYNAMIC_FTRACE
247         /* do not update till all functions have been modified */
248         __ftrace_trace_function_delay = func;
249 #else
250         __ftrace_trace_function = func;
251 #endif
252         ftrace_trace_function = ftrace_test_stop_func;
253 #endif
254 }
255
256 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
257 {
258         ops->next = *list;
259         /*
260          * We are entering ops into the list but another
261          * CPU might be walking that list. We need to make sure
262          * the ops->next pointer is valid before another CPU sees
263          * the ops pointer included into the list.
264          */
265         rcu_assign_pointer(*list, ops);
266 }
267
268 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
269 {
270         struct ftrace_ops **p;
271
272         /*
273          * If we are removing the last function, then simply point
274          * to the ftrace_stub.
275          */
276         if (*list == ops && ops->next == &ftrace_list_end) {
277                 *list = &ftrace_list_end;
278                 return 0;
279         }
280
281         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
282                 if (*p == ops)
283                         break;
284
285         if (*p != ops)
286                 return -1;
287
288         *p = (*p)->next;
289         return 0;
290 }
291
292 static void add_ftrace_list_ops(struct ftrace_ops **list,
293                                 struct ftrace_ops *main_ops,
294                                 struct ftrace_ops *ops)
295 {
296         int first = *list == &ftrace_list_end;
297         add_ftrace_ops(list, ops);
298         if (first)
299                 add_ftrace_ops(&ftrace_ops_list, main_ops);
300 }
301
302 static int remove_ftrace_list_ops(struct ftrace_ops **list,
303                                   struct ftrace_ops *main_ops,
304                                   struct ftrace_ops *ops)
305 {
306         int ret = remove_ftrace_ops(list, ops);
307         if (!ret && *list == &ftrace_list_end)
308                 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
309         return ret;
310 }
311
312 static int __register_ftrace_function(struct ftrace_ops *ops)
313 {
314         if (ftrace_disabled)
315                 return -ENODEV;
316
317         if (FTRACE_WARN_ON(ops == &global_ops))
318                 return -EINVAL;
319
320         if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
321                 return -EBUSY;
322
323         /* We don't support both control and global flags set. */
324         if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
325                 return -EINVAL;
326
327         if (!core_kernel_data((unsigned long)ops))
328                 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
329
330         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
331                 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
332                 ops->flags |= FTRACE_OPS_FL_ENABLED;
333         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
334                 if (control_ops_alloc(ops))
335                         return -ENOMEM;
336                 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
337         } else
338                 add_ftrace_ops(&ftrace_ops_list, ops);
339
340         if (ftrace_enabled)
341                 update_ftrace_function();
342
343         return 0;
344 }
345
346 static int __unregister_ftrace_function(struct ftrace_ops *ops)
347 {
348         int ret;
349
350         if (ftrace_disabled)
351                 return -ENODEV;
352
353         if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
354                 return -EBUSY;
355
356         if (FTRACE_WARN_ON(ops == &global_ops))
357                 return -EINVAL;
358
359         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
360                 ret = remove_ftrace_list_ops(&ftrace_global_list,
361                                              &global_ops, ops);
362                 if (!ret)
363                         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
364         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
365                 ret = remove_ftrace_list_ops(&ftrace_control_list,
366                                              &control_ops, ops);
367                 if (!ret) {
368                         /*
369                          * The ftrace_ops is now removed from the list,
370                          * so there'll be no new users. We must ensure
371                          * all current users are done before we free
372                          * the control data.
373                          */
374                         synchronize_sched();
375                         control_ops_free(ops);
376                 }
377         } else
378                 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
379
380         if (ret < 0)
381                 return ret;
382
383         if (ftrace_enabled)
384                 update_ftrace_function();
385
386         /*
387          * Dynamic ops may be freed, we must make sure that all
388          * callers are done before leaving this function.
389          */
390         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
391                 synchronize_sched();
392
393         return 0;
394 }
395
396 static void ftrace_update_pid_func(void)
397 {
398         /* Only do something if we are tracing something */
399         if (ftrace_trace_function == ftrace_stub)
400                 return;
401
402         update_ftrace_function();
403 }
404
405 #ifdef CONFIG_FUNCTION_PROFILER
406 struct ftrace_profile {
407         struct hlist_node               node;
408         unsigned long                   ip;
409         unsigned long                   counter;
410 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
411         unsigned long long              time;
412         unsigned long long              time_squared;
413 #endif
414 };
415
416 struct ftrace_profile_page {
417         struct ftrace_profile_page      *next;
418         unsigned long                   index;
419         struct ftrace_profile           records[];
420 };
421
422 struct ftrace_profile_stat {
423         atomic_t                        disabled;
424         struct hlist_head               *hash;
425         struct ftrace_profile_page      *pages;
426         struct ftrace_profile_page      *start;
427         struct tracer_stat              stat;
428 };
429
430 #define PROFILE_RECORDS_SIZE                                            \
431         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
432
433 #define PROFILES_PER_PAGE                                       \
434         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
435
436 static int ftrace_profile_bits __read_mostly;
437 static int ftrace_profile_enabled __read_mostly;
438
439 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
440 static DEFINE_MUTEX(ftrace_profile_lock);
441
442 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
443
444 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
445
446 static void *
447 function_stat_next(void *v, int idx)
448 {
449         struct ftrace_profile *rec = v;
450         struct ftrace_profile_page *pg;
451
452         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
453
454  again:
455         if (idx != 0)
456                 rec++;
457
458         if ((void *)rec >= (void *)&pg->records[pg->index]) {
459                 pg = pg->next;
460                 if (!pg)
461                         return NULL;
462                 rec = &pg->records[0];
463                 if (!rec->counter)
464                         goto again;
465         }
466
467         return rec;
468 }
469
470 static void *function_stat_start(struct tracer_stat *trace)
471 {
472         struct ftrace_profile_stat *stat =
473                 container_of(trace, struct ftrace_profile_stat, stat);
474
475         if (!stat || !stat->start)
476                 return NULL;
477
478         return function_stat_next(&stat->start->records[0], 0);
479 }
480
481 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
482 /* function graph compares on total time */
483 static int function_stat_cmp(void *p1, void *p2)
484 {
485         struct ftrace_profile *a = p1;
486         struct ftrace_profile *b = p2;
487
488         if (a->time < b->time)
489                 return -1;
490         if (a->time > b->time)
491                 return 1;
492         else
493                 return 0;
494 }
495 #else
496 /* not function graph compares against hits */
497 static int function_stat_cmp(void *p1, void *p2)
498 {
499         struct ftrace_profile *a = p1;
500         struct ftrace_profile *b = p2;
501
502         if (a->counter < b->counter)
503                 return -1;
504         if (a->counter > b->counter)
505                 return 1;
506         else
507                 return 0;
508 }
509 #endif
510
511 static int function_stat_headers(struct seq_file *m)
512 {
513 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
514         seq_printf(m, "  Function                               "
515                    "Hit    Time            Avg             s^2\n"
516                       "  --------                               "
517                    "---    ----            ---             ---\n");
518 #else
519         seq_printf(m, "  Function                               Hit\n"
520                       "  --------                               ---\n");
521 #endif
522         return 0;
523 }
524
525 static int function_stat_show(struct seq_file *m, void *v)
526 {
527         struct ftrace_profile *rec = v;
528         char str[KSYM_SYMBOL_LEN];
529         int ret = 0;
530 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
531         static struct trace_seq s;
532         unsigned long long avg;
533         unsigned long long stddev;
534 #endif
535         mutex_lock(&ftrace_profile_lock);
536
537         /* we raced with function_profile_reset() */
538         if (unlikely(rec->counter == 0)) {
539                 ret = -EBUSY;
540                 goto out;
541         }
542
543         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
544         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
545
546 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
547         seq_printf(m, "    ");
548         avg = rec->time;
549         do_div(avg, rec->counter);
550
551         /* Sample standard deviation (s^2) */
552         if (rec->counter <= 1)
553                 stddev = 0;
554         else {
555                 stddev = rec->time_squared - rec->counter * avg * avg;
556                 /*
557                  * Divide only 1000 for ns^2 -> us^2 conversion.
558                  * trace_print_graph_duration will divide 1000 again.
559                  */
560                 do_div(stddev, (rec->counter - 1) * 1000);
561         }
562
563         trace_seq_init(&s);
564         trace_print_graph_duration(rec->time, &s);
565         trace_seq_puts(&s, "    ");
566         trace_print_graph_duration(avg, &s);
567         trace_seq_puts(&s, "    ");
568         trace_print_graph_duration(stddev, &s);
569         trace_print_seq(m, &s);
570 #endif
571         seq_putc(m, '\n');
572 out:
573         mutex_unlock(&ftrace_profile_lock);
574
575         return ret;
576 }
577
578 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
579 {
580         struct ftrace_profile_page *pg;
581
582         pg = stat->pages = stat->start;
583
584         while (pg) {
585                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
586                 pg->index = 0;
587                 pg = pg->next;
588         }
589
590         memset(stat->hash, 0,
591                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
592 }
593
594 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
595 {
596         struct ftrace_profile_page *pg;
597         int functions;
598         int pages;
599         int i;
600
601         /* If we already allocated, do nothing */
602         if (stat->pages)
603                 return 0;
604
605         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
606         if (!stat->pages)
607                 return -ENOMEM;
608
609 #ifdef CONFIG_DYNAMIC_FTRACE
610         functions = ftrace_update_tot_cnt;
611 #else
612         /*
613          * We do not know the number of functions that exist because
614          * dynamic tracing is what counts them. With past experience
615          * we have around 20K functions. That should be more than enough.
616          * It is highly unlikely we will execute every function in
617          * the kernel.
618          */
619         functions = 20000;
620 #endif
621
622         pg = stat->start = stat->pages;
623
624         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
625
626         for (i = 0; i < pages; i++) {
627                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
628                 if (!pg->next)
629                         goto out_free;
630                 pg = pg->next;
631         }
632
633         return 0;
634
635  out_free:
636         pg = stat->start;
637         while (pg) {
638                 unsigned long tmp = (unsigned long)pg;
639
640                 pg = pg->next;
641                 free_page(tmp);
642         }
643
644         free_page((unsigned long)stat->pages);
645         stat->pages = NULL;
646         stat->start = NULL;
647
648         return -ENOMEM;
649 }
650
651 static int ftrace_profile_init_cpu(int cpu)
652 {
653         struct ftrace_profile_stat *stat;
654         int size;
655
656         stat = &per_cpu(ftrace_profile_stats, cpu);
657
658         if (stat->hash) {
659                 /* If the profile is already created, simply reset it */
660                 ftrace_profile_reset(stat);
661                 return 0;
662         }
663
664         /*
665          * We are profiling all functions, but usually only a few thousand
666          * functions are hit. We'll make a hash of 1024 items.
667          */
668         size = FTRACE_PROFILE_HASH_SIZE;
669
670         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
671
672         if (!stat->hash)
673                 return -ENOMEM;
674
675         if (!ftrace_profile_bits) {
676                 size--;
677
678                 for (; size; size >>= 1)
679                         ftrace_profile_bits++;
680         }
681
682         /* Preallocate the function profiling pages */
683         if (ftrace_profile_pages_init(stat) < 0) {
684                 kfree(stat->hash);
685                 stat->hash = NULL;
686                 return -ENOMEM;
687         }
688
689         return 0;
690 }
691
692 static int ftrace_profile_init(void)
693 {
694         int cpu;
695         int ret = 0;
696
697         for_each_online_cpu(cpu) {
698                 ret = ftrace_profile_init_cpu(cpu);
699                 if (ret)
700                         break;
701         }
702
703         return ret;
704 }
705
706 /* interrupts must be disabled */
707 static struct ftrace_profile *
708 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
709 {
710         struct ftrace_profile *rec;
711         struct hlist_head *hhd;
712         struct hlist_node *n;
713         unsigned long key;
714
715         key = hash_long(ip, ftrace_profile_bits);
716         hhd = &stat->hash[key];
717
718         if (hlist_empty(hhd))
719                 return NULL;
720
721         hlist_for_each_entry_rcu(rec, n, hhd, node) {
722                 if (rec->ip == ip)
723                         return rec;
724         }
725
726         return NULL;
727 }
728
729 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
730                                struct ftrace_profile *rec)
731 {
732         unsigned long key;
733
734         key = hash_long(rec->ip, ftrace_profile_bits);
735         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
736 }
737
738 /*
739  * The memory is already allocated, this simply finds a new record to use.
740  */
741 static struct ftrace_profile *
742 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
743 {
744         struct ftrace_profile *rec = NULL;
745
746         /* prevent recursion (from NMIs) */
747         if (atomic_inc_return(&stat->disabled) != 1)
748                 goto out;
749
750         /*
751          * Try to find the function again since an NMI
752          * could have added it
753          */
754         rec = ftrace_find_profiled_func(stat, ip);
755         if (rec)
756                 goto out;
757
758         if (stat->pages->index == PROFILES_PER_PAGE) {
759                 if (!stat->pages->next)
760                         goto out;
761                 stat->pages = stat->pages->next;
762         }
763
764         rec = &stat->pages->records[stat->pages->index++];
765         rec->ip = ip;
766         ftrace_add_profile(stat, rec);
767
768  out:
769         atomic_dec(&stat->disabled);
770
771         return rec;
772 }
773
774 static void
775 function_profile_call(unsigned long ip, unsigned long parent_ip)
776 {
777         struct ftrace_profile_stat *stat;
778         struct ftrace_profile *rec;
779         unsigned long flags;
780
781         if (!ftrace_profile_enabled)
782                 return;
783
784         local_irq_save(flags);
785
786         stat = &__get_cpu_var(ftrace_profile_stats);
787         if (!stat->hash || !ftrace_profile_enabled)
788                 goto out;
789
790         rec = ftrace_find_profiled_func(stat, ip);
791         if (!rec) {
792                 rec = ftrace_profile_alloc(stat, ip);
793                 if (!rec)
794                         goto out;
795         }
796
797         rec->counter++;
798  out:
799         local_irq_restore(flags);
800 }
801
802 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
803 static int profile_graph_entry(struct ftrace_graph_ent *trace)
804 {
805         function_profile_call(trace->func, 0);
806         return 1;
807 }
808
809 static void profile_graph_return(struct ftrace_graph_ret *trace)
810 {
811         struct ftrace_profile_stat *stat;
812         unsigned long long calltime;
813         struct ftrace_profile *rec;
814         unsigned long flags;
815
816         local_irq_save(flags);
817         stat = &__get_cpu_var(ftrace_profile_stats);
818         if (!stat->hash || !ftrace_profile_enabled)
819                 goto out;
820
821         /* If the calltime was zero'd ignore it */
822         if (!trace->calltime)
823                 goto out;
824
825         calltime = trace->rettime - trace->calltime;
826
827         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
828                 int index;
829
830                 index = trace->depth;
831
832                 /* Append this call time to the parent time to subtract */
833                 if (index)
834                         current->ret_stack[index - 1].subtime += calltime;
835
836                 if (current->ret_stack[index].subtime < calltime)
837                         calltime -= current->ret_stack[index].subtime;
838                 else
839                         calltime = 0;
840         }
841
842         rec = ftrace_find_profiled_func(stat, trace->func);
843         if (rec) {
844                 rec->time += calltime;
845                 rec->time_squared += calltime * calltime;
846         }
847
848  out:
849         local_irq_restore(flags);
850 }
851
852 static int register_ftrace_profiler(void)
853 {
854         return register_ftrace_graph(&profile_graph_return,
855                                      &profile_graph_entry);
856 }
857
858 static void unregister_ftrace_profiler(void)
859 {
860         unregister_ftrace_graph();
861 }
862 #else
863 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
864         .func           = function_profile_call,
865 };
866
867 static int register_ftrace_profiler(void)
868 {
869         return register_ftrace_function(&ftrace_profile_ops);
870 }
871
872 static void unregister_ftrace_profiler(void)
873 {
874         unregister_ftrace_function(&ftrace_profile_ops);
875 }
876 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
877
878 static ssize_t
879 ftrace_profile_write(struct file *filp, const char __user *ubuf,
880                      size_t cnt, loff_t *ppos)
881 {
882         unsigned long val;
883         int ret;
884
885         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
886         if (ret)
887                 return ret;
888
889         val = !!val;
890
891         mutex_lock(&ftrace_profile_lock);
892         if (ftrace_profile_enabled ^ val) {
893                 if (val) {
894                         ret = ftrace_profile_init();
895                         if (ret < 0) {
896                                 cnt = ret;
897                                 goto out;
898                         }
899
900                         ret = register_ftrace_profiler();
901                         if (ret < 0) {
902                                 cnt = ret;
903                                 goto out;
904                         }
905                         ftrace_profile_enabled = 1;
906                 } else {
907                         ftrace_profile_enabled = 0;
908                         /*
909                          * unregister_ftrace_profiler calls stop_machine
910                          * so this acts like an synchronize_sched.
911                          */
912                         unregister_ftrace_profiler();
913                 }
914         }
915  out:
916         mutex_unlock(&ftrace_profile_lock);
917
918         *ppos += cnt;
919
920         return cnt;
921 }
922
923 static ssize_t
924 ftrace_profile_read(struct file *filp, char __user *ubuf,
925                      size_t cnt, loff_t *ppos)
926 {
927         char buf[64];           /* big enough to hold a number */
928         int r;
929
930         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
931         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
932 }
933
934 static const struct file_operations ftrace_profile_fops = {
935         .open           = tracing_open_generic,
936         .read           = ftrace_profile_read,
937         .write          = ftrace_profile_write,
938         .llseek         = default_llseek,
939 };
940
941 /* used to initialize the real stat files */
942 static struct tracer_stat function_stats __initdata = {
943         .name           = "functions",
944         .stat_start     = function_stat_start,
945         .stat_next      = function_stat_next,
946         .stat_cmp       = function_stat_cmp,
947         .stat_headers   = function_stat_headers,
948         .stat_show      = function_stat_show
949 };
950
951 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
952 {
953         struct ftrace_profile_stat *stat;
954         struct dentry *entry;
955         char *name;
956         int ret;
957         int cpu;
958
959         for_each_possible_cpu(cpu) {
960                 stat = &per_cpu(ftrace_profile_stats, cpu);
961
962                 /* allocate enough for function name + cpu number */
963                 name = kmalloc(32, GFP_KERNEL);
964                 if (!name) {
965                         /*
966                          * The files created are permanent, if something happens
967                          * we still do not free memory.
968                          */
969                         WARN(1,
970                              "Could not allocate stat file for cpu %d\n",
971                              cpu);
972                         return;
973                 }
974                 stat->stat = function_stats;
975                 snprintf(name, 32, "function%d", cpu);
976                 stat->stat.name = name;
977                 ret = register_stat_tracer(&stat->stat);
978                 if (ret) {
979                         WARN(1,
980                              "Could not register function stat for cpu %d\n",
981                              cpu);
982                         kfree(name);
983                         return;
984                 }
985         }
986
987         entry = debugfs_create_file("function_profile_enabled", 0644,
988                                     d_tracer, NULL, &ftrace_profile_fops);
989         if (!entry)
990                 pr_warning("Could not create debugfs "
991                            "'function_profile_enabled' entry\n");
992 }
993
994 #else /* CONFIG_FUNCTION_PROFILER */
995 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
996 {
997 }
998 #endif /* CONFIG_FUNCTION_PROFILER */
999
1000 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1001
1002 #ifdef CONFIG_DYNAMIC_FTRACE
1003
1004 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1005 # error Dynamic ftrace depends on MCOUNT_RECORD
1006 #endif
1007
1008 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1009
1010 struct ftrace_func_probe {
1011         struct hlist_node       node;
1012         struct ftrace_probe_ops *ops;
1013         unsigned long           flags;
1014         unsigned long           ip;
1015         void                    *data;
1016         struct rcu_head         rcu;
1017 };
1018
1019 struct ftrace_func_entry {
1020         struct hlist_node hlist;
1021         unsigned long ip;
1022 };
1023
1024 struct ftrace_hash {
1025         unsigned long           size_bits;
1026         struct hlist_head       *buckets;
1027         unsigned long           count;
1028         struct rcu_head         rcu;
1029 };
1030
1031 /*
1032  * We make these constant because no one should touch them,
1033  * but they are used as the default "empty hash", to avoid allocating
1034  * it all the time. These are in a read only section such that if
1035  * anyone does try to modify it, it will cause an exception.
1036  */
1037 static const struct hlist_head empty_buckets[1];
1038 static const struct ftrace_hash empty_hash = {
1039         .buckets = (struct hlist_head *)empty_buckets,
1040 };
1041 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1042
1043 static struct ftrace_ops global_ops = {
1044         .func                   = ftrace_stub,
1045         .notrace_hash           = EMPTY_HASH,
1046         .filter_hash            = EMPTY_HASH,
1047 };
1048
1049 static DEFINE_MUTEX(ftrace_regex_lock);
1050
1051 struct ftrace_page {
1052         struct ftrace_page      *next;
1053         struct dyn_ftrace       *records;
1054         int                     index;
1055         int                     size;
1056 };
1057
1058 static struct ftrace_page *ftrace_new_pgs;
1059
1060 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1061 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1062
1063 /* estimate from running different kernels */
1064 #define NR_TO_INIT              10000
1065
1066 static struct ftrace_page       *ftrace_pages_start;
1067 static struct ftrace_page       *ftrace_pages;
1068
1069 static bool ftrace_hash_empty(struct ftrace_hash *hash)
1070 {
1071         return !hash || !hash->count;
1072 }
1073
1074 static struct ftrace_func_entry *
1075 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1076 {
1077         unsigned long key;
1078         struct ftrace_func_entry *entry;
1079         struct hlist_head *hhd;
1080         struct hlist_node *n;
1081
1082         if (ftrace_hash_empty(hash))
1083                 return NULL;
1084
1085         if (hash->size_bits > 0)
1086                 key = hash_long(ip, hash->size_bits);
1087         else
1088                 key = 0;
1089
1090         hhd = &hash->buckets[key];
1091
1092         hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1093                 if (entry->ip == ip)
1094                         return entry;
1095         }
1096         return NULL;
1097 }
1098
1099 static void __add_hash_entry(struct ftrace_hash *hash,
1100                              struct ftrace_func_entry *entry)
1101 {
1102         struct hlist_head *hhd;
1103         unsigned long key;
1104
1105         if (hash->size_bits)
1106                 key = hash_long(entry->ip, hash->size_bits);
1107         else
1108                 key = 0;
1109
1110         hhd = &hash->buckets[key];
1111         hlist_add_head(&entry->hlist, hhd);
1112         hash->count++;
1113 }
1114
1115 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1116 {
1117         struct ftrace_func_entry *entry;
1118
1119         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1120         if (!entry)
1121                 return -ENOMEM;
1122
1123         entry->ip = ip;
1124         __add_hash_entry(hash, entry);
1125
1126         return 0;
1127 }
1128
1129 static void
1130 free_hash_entry(struct ftrace_hash *hash,
1131                   struct ftrace_func_entry *entry)
1132 {
1133         hlist_del(&entry->hlist);
1134         kfree(entry);
1135         hash->count--;
1136 }
1137
1138 static void
1139 remove_hash_entry(struct ftrace_hash *hash,
1140                   struct ftrace_func_entry *entry)
1141 {
1142         hlist_del(&entry->hlist);
1143         hash->count--;
1144 }
1145
1146 static void ftrace_hash_clear(struct ftrace_hash *hash)
1147 {
1148         struct hlist_head *hhd;
1149         struct hlist_node *tp, *tn;
1150         struct ftrace_func_entry *entry;
1151         int size = 1 << hash->size_bits;
1152         int i;
1153
1154         if (!hash->count)
1155                 return;
1156
1157         for (i = 0; i < size; i++) {
1158                 hhd = &hash->buckets[i];
1159                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1160                         free_hash_entry(hash, entry);
1161         }
1162         FTRACE_WARN_ON(hash->count);
1163 }
1164
1165 static void free_ftrace_hash(struct ftrace_hash *hash)
1166 {
1167         if (!hash || hash == EMPTY_HASH)
1168                 return;
1169         ftrace_hash_clear(hash);
1170         kfree(hash->buckets);
1171         kfree(hash);
1172 }
1173
1174 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1175 {
1176         struct ftrace_hash *hash;
1177
1178         hash = container_of(rcu, struct ftrace_hash, rcu);
1179         free_ftrace_hash(hash);
1180 }
1181
1182 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1183 {
1184         if (!hash || hash == EMPTY_HASH)
1185                 return;
1186         call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1187 }
1188
1189 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1190 {
1191         struct ftrace_hash *hash;
1192         int size;
1193
1194         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1195         if (!hash)
1196                 return NULL;
1197
1198         size = 1 << size_bits;
1199         hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1200
1201         if (!hash->buckets) {
1202                 kfree(hash);
1203                 return NULL;
1204         }
1205
1206         hash->size_bits = size_bits;
1207
1208         return hash;
1209 }
1210
1211 static struct ftrace_hash *
1212 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1213 {
1214         struct ftrace_func_entry *entry;
1215         struct ftrace_hash *new_hash;
1216         struct hlist_node *tp;
1217         int size;
1218         int ret;
1219         int i;
1220
1221         new_hash = alloc_ftrace_hash(size_bits);
1222         if (!new_hash)
1223                 return NULL;
1224
1225         /* Empty hash? */
1226         if (ftrace_hash_empty(hash))
1227                 return new_hash;
1228
1229         size = 1 << hash->size_bits;
1230         for (i = 0; i < size; i++) {
1231                 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1232                         ret = add_hash_entry(new_hash, entry->ip);
1233                         if (ret < 0)
1234                                 goto free_hash;
1235                 }
1236         }
1237
1238         FTRACE_WARN_ON(new_hash->count != hash->count);
1239
1240         return new_hash;
1241
1242  free_hash:
1243         free_ftrace_hash(new_hash);
1244         return NULL;
1245 }
1246
1247 static void
1248 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1249 static void
1250 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1251
1252 static int
1253 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1254                  struct ftrace_hash **dst, struct ftrace_hash *src)
1255 {
1256         struct ftrace_func_entry *entry;
1257         struct hlist_node *tp, *tn;
1258         struct hlist_head *hhd;
1259         struct ftrace_hash *old_hash;
1260         struct ftrace_hash *new_hash;
1261         unsigned long key;
1262         int size = src->count;
1263         int bits = 0;
1264         int ret;
1265         int i;
1266
1267         /*
1268          * Remove the current set, update the hash and add
1269          * them back.
1270          */
1271         ftrace_hash_rec_disable(ops, enable);
1272
1273         /*
1274          * If the new source is empty, just free dst and assign it
1275          * the empty_hash.
1276          */
1277         if (!src->count) {
1278                 free_ftrace_hash_rcu(*dst);
1279                 rcu_assign_pointer(*dst, EMPTY_HASH);
1280                 /* still need to update the function records */
1281                 ret = 0;
1282                 goto out;
1283         }
1284
1285         /*
1286          * Make the hash size about 1/2 the # found
1287          */
1288         for (size /= 2; size; size >>= 1)
1289                 bits++;
1290
1291         /* Don't allocate too much */
1292         if (bits > FTRACE_HASH_MAX_BITS)
1293                 bits = FTRACE_HASH_MAX_BITS;
1294
1295         ret = -ENOMEM;
1296         new_hash = alloc_ftrace_hash(bits);
1297         if (!new_hash)
1298                 goto out;
1299
1300         size = 1 << src->size_bits;
1301         for (i = 0; i < size; i++) {
1302                 hhd = &src->buckets[i];
1303                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1304                         if (bits > 0)
1305                                 key = hash_long(entry->ip, bits);
1306                         else
1307                                 key = 0;
1308                         remove_hash_entry(src, entry);
1309                         __add_hash_entry(new_hash, entry);
1310                 }
1311         }
1312
1313         old_hash = *dst;
1314         rcu_assign_pointer(*dst, new_hash);
1315         free_ftrace_hash_rcu(old_hash);
1316
1317         ret = 0;
1318  out:
1319         /*
1320          * Enable regardless of ret:
1321          *  On success, we enable the new hash.
1322          *  On failure, we re-enable the original hash.
1323          */
1324         ftrace_hash_rec_enable(ops, enable);
1325
1326         return ret;
1327 }
1328
1329 /*
1330  * Test the hashes for this ops to see if we want to call
1331  * the ops->func or not.
1332  *
1333  * It's a match if the ip is in the ops->filter_hash or
1334  * the filter_hash does not exist or is empty,
1335  *  AND
1336  * the ip is not in the ops->notrace_hash.
1337  *
1338  * This needs to be called with preemption disabled as
1339  * the hashes are freed with call_rcu_sched().
1340  */
1341 static int
1342 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1343 {
1344         struct ftrace_hash *filter_hash;
1345         struct ftrace_hash *notrace_hash;
1346         int ret;
1347
1348         filter_hash = rcu_dereference_raw(ops->filter_hash);
1349         notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1350
1351         if ((ftrace_hash_empty(filter_hash) ||
1352              ftrace_lookup_ip(filter_hash, ip)) &&
1353             (ftrace_hash_empty(notrace_hash) ||
1354              !ftrace_lookup_ip(notrace_hash, ip)))
1355                 ret = 1;
1356         else
1357                 ret = 0;
1358
1359         return ret;
1360 }
1361
1362 /*
1363  * This is a double for. Do not use 'break' to break out of the loop,
1364  * you must use a goto.
1365  */
1366 #define do_for_each_ftrace_rec(pg, rec)                                 \
1367         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1368                 int _____i;                                             \
1369                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1370                         rec = &pg->records[_____i];
1371
1372 #define while_for_each_ftrace_rec()             \
1373                 }                               \
1374         }
1375
1376
1377 static int ftrace_cmp_recs(const void *a, const void *b)
1378 {
1379         const struct dyn_ftrace *reca = a;
1380         const struct dyn_ftrace *recb = b;
1381
1382         if (reca->ip > recb->ip)
1383                 return 1;
1384         if (reca->ip < recb->ip)
1385                 return -1;
1386         return 0;
1387 }
1388
1389 /**
1390  * ftrace_location - return true if the ip giving is a traced location
1391  * @ip: the instruction pointer to check
1392  *
1393  * Returns 1 if @ip given is a pointer to a ftrace location.
1394  * That is, the instruction that is either a NOP or call to
1395  * the function tracer. It checks the ftrace internal tables to
1396  * determine if the address belongs or not.
1397  */
1398 int ftrace_location(unsigned long ip)
1399 {
1400         struct ftrace_page *pg;
1401         struct dyn_ftrace *rec;
1402         struct dyn_ftrace key;
1403
1404         key.ip = ip;
1405
1406         for (pg = ftrace_pages_start; pg; pg = pg->next) {
1407                 rec = bsearch(&key, pg->records, pg->index,
1408                               sizeof(struct dyn_ftrace),
1409                               ftrace_cmp_recs);
1410                 if (rec)
1411                         return 1;
1412         }
1413
1414         return 0;
1415 }
1416
1417 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1418                                      int filter_hash,
1419                                      bool inc)
1420 {
1421         struct ftrace_hash *hash;
1422         struct ftrace_hash *other_hash;
1423         struct ftrace_page *pg;
1424         struct dyn_ftrace *rec;
1425         int count = 0;
1426         int all = 0;
1427
1428         /* Only update if the ops has been registered */
1429         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1430                 return;
1431
1432         /*
1433          * In the filter_hash case:
1434          *   If the count is zero, we update all records.
1435          *   Otherwise we just update the items in the hash.
1436          *
1437          * In the notrace_hash case:
1438          *   We enable the update in the hash.
1439          *   As disabling notrace means enabling the tracing,
1440          *   and enabling notrace means disabling, the inc variable
1441          *   gets inversed.
1442          */
1443         if (filter_hash) {
1444                 hash = ops->filter_hash;
1445                 other_hash = ops->notrace_hash;
1446                 if (ftrace_hash_empty(hash))
1447                         all = 1;
1448         } else {
1449                 inc = !inc;
1450                 hash = ops->notrace_hash;
1451                 other_hash = ops->filter_hash;
1452                 /*
1453                  * If the notrace hash has no items,
1454                  * then there's nothing to do.
1455                  */
1456                 if (ftrace_hash_empty(hash))
1457                         return;
1458         }
1459
1460         do_for_each_ftrace_rec(pg, rec) {
1461                 int in_other_hash = 0;
1462                 int in_hash = 0;
1463                 int match = 0;
1464
1465                 if (all) {
1466                         /*
1467                          * Only the filter_hash affects all records.
1468                          * Update if the record is not in the notrace hash.
1469                          */
1470                         if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1471                                 match = 1;
1472                 } else {
1473                         in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1474                         in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1475
1476                         /*
1477                          *
1478                          */
1479                         if (filter_hash && in_hash && !in_other_hash)
1480                                 match = 1;
1481                         else if (!filter_hash && in_hash &&
1482                                  (in_other_hash || ftrace_hash_empty(other_hash)))
1483                                 match = 1;
1484                 }
1485                 if (!match)
1486                         continue;
1487
1488                 if (inc) {
1489                         rec->flags++;
1490                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1491                                 return;
1492                 } else {
1493                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1494                                 return;
1495                         rec->flags--;
1496                 }
1497                 count++;
1498                 /* Shortcut, if we handled all records, we are done. */
1499                 if (!all && count == hash->count)
1500                         return;
1501         } while_for_each_ftrace_rec();
1502 }
1503
1504 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1505                                     int filter_hash)
1506 {
1507         __ftrace_hash_rec_update(ops, filter_hash, 0);
1508 }
1509
1510 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1511                                    int filter_hash)
1512 {
1513         __ftrace_hash_rec_update(ops, filter_hash, 1);
1514 }
1515
1516 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1517 {
1518         if (ftrace_pages->index == ftrace_pages->size) {
1519                 /* We should have allocated enough */
1520                 if (WARN_ON(!ftrace_pages->next))
1521                         return NULL;
1522                 ftrace_pages = ftrace_pages->next;
1523         }
1524
1525         return &ftrace_pages->records[ftrace_pages->index++];
1526 }
1527
1528 static struct dyn_ftrace *
1529 ftrace_record_ip(unsigned long ip)
1530 {
1531         struct dyn_ftrace *rec;
1532
1533         if (ftrace_disabled)
1534                 return NULL;
1535
1536         rec = ftrace_alloc_dyn_node(ip);
1537         if (!rec)
1538                 return NULL;
1539
1540         rec->ip = ip;
1541
1542         return rec;
1543 }
1544
1545 static void print_ip_ins(const char *fmt, unsigned char *p)
1546 {
1547         int i;
1548
1549         printk(KERN_CONT "%s", fmt);
1550
1551         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1552                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1553 }
1554
1555 /**
1556  * ftrace_bug - report and shutdown function tracer
1557  * @failed: The failed type (EFAULT, EINVAL, EPERM)
1558  * @ip: The address that failed
1559  *
1560  * The arch code that enables or disables the function tracing
1561  * can call ftrace_bug() when it has detected a problem in
1562  * modifying the code. @failed should be one of either:
1563  * EFAULT - if the problem happens on reading the @ip address
1564  * EINVAL - if what is read at @ip is not what was expected
1565  * EPERM - if the problem happens on writting to the @ip address
1566  */
1567 void ftrace_bug(int failed, unsigned long ip)
1568 {
1569         switch (failed) {
1570         case -EFAULT:
1571                 FTRACE_WARN_ON_ONCE(1);
1572                 pr_info("ftrace faulted on modifying ");
1573                 print_ip_sym(ip);
1574                 break;
1575         case -EINVAL:
1576                 FTRACE_WARN_ON_ONCE(1);
1577                 pr_info("ftrace failed to modify ");
1578                 print_ip_sym(ip);
1579                 print_ip_ins(" actual: ", (unsigned char *)ip);
1580                 printk(KERN_CONT "\n");
1581                 break;
1582         case -EPERM:
1583                 FTRACE_WARN_ON_ONCE(1);
1584                 pr_info("ftrace faulted on writing ");
1585                 print_ip_sym(ip);
1586                 break;
1587         default:
1588                 FTRACE_WARN_ON_ONCE(1);
1589                 pr_info("ftrace faulted on unknown error ");
1590                 print_ip_sym(ip);
1591         }
1592 }
1593
1594
1595 /* Return 1 if the address range is reserved for ftrace */
1596 int ftrace_text_reserved(void *start, void *end)
1597 {
1598         struct dyn_ftrace *rec;
1599         struct ftrace_page *pg;
1600
1601         do_for_each_ftrace_rec(pg, rec) {
1602                 if (rec->ip <= (unsigned long)end &&
1603                     rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1604                         return 1;
1605         } while_for_each_ftrace_rec();
1606         return 0;
1607 }
1608
1609 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1610 {
1611         unsigned long flag = 0UL;
1612
1613         /*
1614          * If we are updating calls:
1615          *
1616          *   If the record has a ref count, then we need to enable it
1617          *   because someone is using it.
1618          *
1619          *   Otherwise we make sure its disabled.
1620          *
1621          * If we are disabling calls, then disable all records that
1622          * are enabled.
1623          */
1624         if (enable && (rec->flags & ~FTRACE_FL_MASK))
1625                 flag = FTRACE_FL_ENABLED;
1626
1627         /* If the state of this record hasn't changed, then do nothing */
1628         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1629                 return FTRACE_UPDATE_IGNORE;
1630
1631         if (flag) {
1632                 if (update)
1633                         rec->flags |= FTRACE_FL_ENABLED;
1634                 return FTRACE_UPDATE_MAKE_CALL;
1635         }
1636
1637         if (update)
1638                 rec->flags &= ~FTRACE_FL_ENABLED;
1639
1640         return FTRACE_UPDATE_MAKE_NOP;
1641 }
1642
1643 /**
1644  * ftrace_update_record, set a record that now is tracing or not
1645  * @rec: the record to update
1646  * @enable: set to 1 if the record is tracing, zero to force disable
1647  *
1648  * The records that represent all functions that can be traced need
1649  * to be updated when tracing has been enabled.
1650  */
1651 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1652 {
1653         return ftrace_check_record(rec, enable, 1);
1654 }
1655
1656 /**
1657  * ftrace_test_record, check if the record has been enabled or not
1658  * @rec: the record to test
1659  * @enable: set to 1 to check if enabled, 0 if it is disabled
1660  *
1661  * The arch code may need to test if a record is already set to
1662  * tracing to determine how to modify the function code that it
1663  * represents.
1664  */
1665 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1666 {
1667         return ftrace_check_record(rec, enable, 0);
1668 }
1669
1670 static int
1671 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1672 {
1673         unsigned long ftrace_addr;
1674         int ret;
1675
1676         ftrace_addr = (unsigned long)FTRACE_ADDR;
1677
1678         ret = ftrace_update_record(rec, enable);
1679
1680         switch (ret) {
1681         case FTRACE_UPDATE_IGNORE:
1682                 return 0;
1683
1684         case FTRACE_UPDATE_MAKE_CALL:
1685                 return ftrace_make_call(rec, ftrace_addr);
1686
1687         case FTRACE_UPDATE_MAKE_NOP:
1688                 return ftrace_make_nop(NULL, rec, ftrace_addr);
1689         }
1690
1691         return -1; /* unknow ftrace bug */
1692 }
1693
1694 static void ftrace_replace_code(int update)
1695 {
1696         struct dyn_ftrace *rec;
1697         struct ftrace_page *pg;
1698         int failed;
1699
1700         if (unlikely(ftrace_disabled))
1701                 return;
1702
1703         do_for_each_ftrace_rec(pg, rec) {
1704                 failed = __ftrace_replace_code(rec, update);
1705                 if (failed) {
1706                         ftrace_bug(failed, rec->ip);
1707                         /* Stop processing */
1708                         return;
1709                 }
1710         } while_for_each_ftrace_rec();
1711 }
1712
1713 struct ftrace_rec_iter {
1714         struct ftrace_page      *pg;
1715         int                     index;
1716 };
1717
1718 /**
1719  * ftrace_rec_iter_start, start up iterating over traced functions
1720  *
1721  * Returns an iterator handle that is used to iterate over all
1722  * the records that represent address locations where functions
1723  * are traced.
1724  *
1725  * May return NULL if no records are available.
1726  */
1727 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1728 {
1729         /*
1730          * We only use a single iterator.
1731          * Protected by the ftrace_lock mutex.
1732          */
1733         static struct ftrace_rec_iter ftrace_rec_iter;
1734         struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1735
1736         iter->pg = ftrace_pages_start;
1737         iter->index = 0;
1738
1739         /* Could have empty pages */
1740         while (iter->pg && !iter->pg->index)
1741                 iter->pg = iter->pg->next;
1742
1743         if (!iter->pg)
1744                 return NULL;
1745
1746         return iter;
1747 }
1748
1749 /**
1750  * ftrace_rec_iter_next, get the next record to process.
1751  * @iter: The handle to the iterator.
1752  *
1753  * Returns the next iterator after the given iterator @iter.
1754  */
1755 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1756 {
1757         iter->index++;
1758
1759         if (iter->index >= iter->pg->index) {
1760                 iter->pg = iter->pg->next;
1761                 iter->index = 0;
1762
1763                 /* Could have empty pages */
1764                 while (iter->pg && !iter->pg->index)
1765                         iter->pg = iter->pg->next;
1766         }
1767
1768         if (!iter->pg)
1769                 return NULL;
1770
1771         return iter;
1772 }
1773
1774 /**
1775  * ftrace_rec_iter_record, get the record at the iterator location
1776  * @iter: The current iterator location
1777  *
1778  * Returns the record that the current @iter is at.
1779  */
1780 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1781 {
1782         return &iter->pg->records[iter->index];
1783 }
1784
1785 static int
1786 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1787 {
1788         unsigned long ip;
1789         int ret;
1790
1791         ip = rec->ip;
1792
1793         if (unlikely(ftrace_disabled))
1794                 return 0;
1795
1796         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1797         if (ret) {
1798                 ftrace_bug(ret, ip);
1799                 return 0;
1800         }
1801         return 1;
1802 }
1803
1804 /*
1805  * archs can override this function if they must do something
1806  * before the modifying code is performed.
1807  */
1808 int __weak ftrace_arch_code_modify_prepare(void)
1809 {
1810         return 0;
1811 }
1812
1813 /*
1814  * archs can override this function if they must do something
1815  * after the modifying code is performed.
1816  */
1817 int __weak ftrace_arch_code_modify_post_process(void)
1818 {
1819         return 0;
1820 }
1821
1822 static int __ftrace_modify_code(void *data)
1823 {
1824         int *command = data;
1825
1826         if (*command & FTRACE_UPDATE_CALLS)
1827                 ftrace_replace_code(1);
1828         else if (*command & FTRACE_DISABLE_CALLS)
1829                 ftrace_replace_code(0);
1830
1831         if (*command & FTRACE_UPDATE_TRACE_FUNC)
1832                 ftrace_update_ftrace_func(ftrace_trace_function);
1833
1834         if (*command & FTRACE_START_FUNC_RET)
1835                 ftrace_enable_ftrace_graph_caller();
1836         else if (*command & FTRACE_STOP_FUNC_RET)
1837                 ftrace_disable_ftrace_graph_caller();
1838
1839         return 0;
1840 }
1841
1842 /**
1843  * ftrace_run_stop_machine, go back to the stop machine method
1844  * @command: The command to tell ftrace what to do
1845  *
1846  * If an arch needs to fall back to the stop machine method, the
1847  * it can call this function.
1848  */
1849 void ftrace_run_stop_machine(int command)
1850 {
1851         stop_machine(__ftrace_modify_code, &command, NULL);
1852 }
1853
1854 /**
1855  * arch_ftrace_update_code, modify the code to trace or not trace
1856  * @command: The command that needs to be done
1857  *
1858  * Archs can override this function if it does not need to
1859  * run stop_machine() to modify code.
1860  */
1861 void __weak arch_ftrace_update_code(int command)
1862 {
1863         ftrace_run_stop_machine(command);
1864 }
1865
1866 static void ftrace_run_update_code(int command)
1867 {
1868         int ret;
1869
1870         ret = ftrace_arch_code_modify_prepare();
1871         FTRACE_WARN_ON(ret);
1872         if (ret)
1873                 return;
1874         /*
1875          * Do not call function tracer while we update the code.
1876          * We are in stop machine.
1877          */
1878         function_trace_stop++;
1879
1880         /*
1881          * By default we use stop_machine() to modify the code.
1882          * But archs can do what ever they want as long as it
1883          * is safe. The stop_machine() is the safest, but also
1884          * produces the most overhead.
1885          */
1886         arch_ftrace_update_code(command);
1887
1888 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1889         /*
1890          * For archs that call ftrace_test_stop_func(), we must
1891          * wait till after we update all the function callers
1892          * before we update the callback. This keeps different
1893          * ops that record different functions from corrupting
1894          * each other.
1895          */
1896         __ftrace_trace_function = __ftrace_trace_function_delay;
1897 #endif
1898         function_trace_stop--;
1899
1900         ret = ftrace_arch_code_modify_post_process();
1901         FTRACE_WARN_ON(ret);
1902 }
1903
1904 static ftrace_func_t saved_ftrace_func;
1905 static int ftrace_start_up;
1906 static int global_start_up;
1907
1908 static void ftrace_startup_enable(int command)
1909 {
1910         if (saved_ftrace_func != ftrace_trace_function) {
1911                 saved_ftrace_func = ftrace_trace_function;
1912                 command |= FTRACE_UPDATE_TRACE_FUNC;
1913         }
1914
1915         if (!command || !ftrace_enabled)
1916                 return;
1917
1918         ftrace_run_update_code(command);
1919 }
1920
1921 static int ftrace_startup(struct ftrace_ops *ops, int command)
1922 {
1923         bool hash_enable = true;
1924
1925         if (unlikely(ftrace_disabled))
1926                 return -ENODEV;
1927
1928         ftrace_start_up++;
1929         command |= FTRACE_UPDATE_CALLS;
1930
1931         /* ops marked global share the filter hashes */
1932         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1933                 ops = &global_ops;
1934                 /* Don't update hash if global is already set */
1935                 if (global_start_up)
1936                         hash_enable = false;
1937                 global_start_up++;
1938         }
1939
1940         ops->flags |= FTRACE_OPS_FL_ENABLED;
1941         if (hash_enable)
1942                 ftrace_hash_rec_enable(ops, 1);
1943
1944         ftrace_startup_enable(command);
1945
1946         return 0;
1947 }
1948
1949 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1950 {
1951         bool hash_disable = true;
1952
1953         if (unlikely(ftrace_disabled))
1954                 return;
1955
1956         ftrace_start_up--;
1957         /*
1958          * Just warn in case of unbalance, no need to kill ftrace, it's not
1959          * critical but the ftrace_call callers may be never nopped again after
1960          * further ftrace uses.
1961          */
1962         WARN_ON_ONCE(ftrace_start_up < 0);
1963
1964         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1965                 ops = &global_ops;
1966                 global_start_up--;
1967                 WARN_ON_ONCE(global_start_up < 0);
1968                 /* Don't update hash if global still has users */
1969                 if (global_start_up) {
1970                         WARN_ON_ONCE(!ftrace_start_up);
1971                         hash_disable = false;
1972                 }
1973         }
1974
1975         if (hash_disable)
1976                 ftrace_hash_rec_disable(ops, 1);
1977
1978         if (ops != &global_ops || !global_start_up)
1979                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1980
1981         command |= FTRACE_UPDATE_CALLS;
1982
1983         if (saved_ftrace_func != ftrace_trace_function) {
1984                 saved_ftrace_func = ftrace_trace_function;
1985                 command |= FTRACE_UPDATE_TRACE_FUNC;
1986         }
1987
1988         if (!command || !ftrace_enabled)
1989                 return;
1990
1991         ftrace_run_update_code(command);
1992 }
1993
1994 static void ftrace_startup_sysctl(void)
1995 {
1996         if (unlikely(ftrace_disabled))
1997                 return;
1998
1999         /* Force update next time */
2000         saved_ftrace_func = NULL;
2001         /* ftrace_start_up is true if we want ftrace running */
2002         if (ftrace_start_up)
2003                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2004 }
2005
2006 static void ftrace_shutdown_sysctl(void)
2007 {
2008         if (unlikely(ftrace_disabled))
2009                 return;
2010
2011         /* ftrace_start_up is true if ftrace is running */
2012         if (ftrace_start_up)
2013                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2014 }
2015
2016 static cycle_t          ftrace_update_time;
2017 static unsigned long    ftrace_update_cnt;
2018 unsigned long           ftrace_update_tot_cnt;
2019
2020 static int ops_traces_mod(struct ftrace_ops *ops)
2021 {
2022         struct ftrace_hash *hash;
2023
2024         hash = ops->filter_hash;
2025         return ftrace_hash_empty(hash);
2026 }
2027
2028 static int ftrace_update_code(struct module *mod)
2029 {
2030         struct ftrace_page *pg;
2031         struct dyn_ftrace *p;
2032         cycle_t start, stop;
2033         unsigned long ref = 0;
2034         int i;
2035
2036         /*
2037          * When adding a module, we need to check if tracers are
2038          * currently enabled and if they are set to trace all functions.
2039          * If they are, we need to enable the module functions as well
2040          * as update the reference counts for those function records.
2041          */
2042         if (mod) {
2043                 struct ftrace_ops *ops;
2044
2045                 for (ops = ftrace_ops_list;
2046                      ops != &ftrace_list_end; ops = ops->next) {
2047                         if (ops->flags & FTRACE_OPS_FL_ENABLED &&
2048                             ops_traces_mod(ops))
2049                                 ref++;
2050                 }
2051         }
2052
2053         start = ftrace_now(raw_smp_processor_id());
2054         ftrace_update_cnt = 0;
2055
2056         for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2057
2058                 for (i = 0; i < pg->index; i++) {
2059                         /* If something went wrong, bail without enabling anything */
2060                         if (unlikely(ftrace_disabled))
2061                                 return -1;
2062
2063                         p = &pg->records[i];
2064                         p->flags = ref;
2065
2066                         /*
2067                          * Do the initial record conversion from mcount jump
2068                          * to the NOP instructions.
2069                          */
2070                         if (!ftrace_code_disable(mod, p))
2071                                 break;
2072
2073                         ftrace_update_cnt++;
2074
2075                         /*
2076                          * If the tracing is enabled, go ahead and enable the record.
2077                          *
2078                          * The reason not to enable the record immediatelly is the
2079                          * inherent check of ftrace_make_nop/ftrace_make_call for
2080                          * correct previous instructions.  Making first the NOP
2081                          * conversion puts the module to the correct state, thus
2082                          * passing the ftrace_make_call check.
2083                          */
2084                         if (ftrace_start_up && ref) {
2085                                 int failed = __ftrace_replace_code(p, 1);
2086                                 if (failed)
2087                                         ftrace_bug(failed, p->ip);
2088                         }
2089                 }
2090         }
2091
2092         ftrace_new_pgs = NULL;
2093
2094         stop = ftrace_now(raw_smp_processor_id());
2095         ftrace_update_time = stop - start;
2096         ftrace_update_tot_cnt += ftrace_update_cnt;
2097
2098         return 0;
2099 }
2100
2101 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2102 {
2103         int order;
2104         int cnt;
2105
2106         if (WARN_ON(!count))
2107                 return -EINVAL;
2108
2109         order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2110
2111         /*
2112          * We want to fill as much as possible. No more than a page
2113          * may be empty.
2114          */
2115         while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2116                 order--;
2117
2118  again:
2119         pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2120
2121         if (!pg->records) {
2122                 /* if we can't allocate this size, try something smaller */
2123                 if (!order)
2124                         return -ENOMEM;
2125                 order >>= 1;
2126                 goto again;
2127         }
2128
2129         cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2130         pg->size = cnt;
2131
2132         if (cnt > count)
2133                 cnt = count;
2134
2135         return cnt;
2136 }
2137
2138 static struct ftrace_page *
2139 ftrace_allocate_pages(unsigned long num_to_init)
2140 {
2141         struct ftrace_page *start_pg;
2142         struct ftrace_page *pg;
2143         int order;
2144         int cnt;
2145
2146         if (!num_to_init)
2147                 return 0;
2148
2149         start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2150         if (!pg)
2151                 return NULL;
2152
2153         /*
2154          * Try to allocate as much as possible in one continues
2155          * location that fills in all of the space. We want to
2156          * waste as little space as possible.
2157          */
2158         for (;;) {
2159                 cnt = ftrace_allocate_records(pg, num_to_init);
2160                 if (cnt < 0)
2161                         goto free_pages;
2162
2163                 num_to_init -= cnt;
2164                 if (!num_to_init)
2165                         break;
2166
2167                 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2168                 if (!pg->next)
2169                         goto free_pages;
2170
2171                 pg = pg->next;
2172         }
2173
2174         return start_pg;
2175
2176  free_pages:
2177         while (start_pg) {
2178                 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2179                 free_pages((unsigned long)pg->records, order);
2180                 start_pg = pg->next;
2181                 kfree(pg);
2182                 pg = start_pg;
2183         }
2184         pr_info("ftrace: FAILED to allocate memory for functions\n");
2185         return NULL;
2186 }
2187
2188 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2189 {
2190         int cnt;
2191
2192         if (!num_to_init) {
2193                 pr_info("ftrace: No functions to be traced?\n");
2194                 return -1;
2195         }
2196
2197         cnt = num_to_init / ENTRIES_PER_PAGE;
2198         pr_info("ftrace: allocating %ld entries in %d pages\n",
2199                 num_to_init, cnt + 1);
2200
2201         return 0;
2202 }
2203
2204 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2205
2206 struct ftrace_iterator {
2207         loff_t                          pos;
2208         loff_t                          func_pos;
2209         struct ftrace_page              *pg;
2210         struct dyn_ftrace               *func;
2211         struct ftrace_func_probe        *probe;
2212         struct trace_parser             parser;
2213         struct ftrace_hash              *hash;
2214         struct ftrace_ops               *ops;
2215         int                             hidx;
2216         int                             idx;
2217         unsigned                        flags;
2218 };
2219
2220 static void *
2221 t_hash_next(struct seq_file *m, loff_t *pos)
2222 {
2223         struct ftrace_iterator *iter = m->private;
2224         struct hlist_node *hnd = NULL;
2225         struct hlist_head *hhd;
2226
2227         (*pos)++;
2228         iter->pos = *pos;
2229
2230         if (iter->probe)
2231                 hnd = &iter->probe->node;
2232  retry:
2233         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2234                 return NULL;
2235
2236         hhd = &ftrace_func_hash[iter->hidx];
2237
2238         if (hlist_empty(hhd)) {
2239                 iter->hidx++;
2240                 hnd = NULL;
2241                 goto retry;
2242         }
2243
2244         if (!hnd)
2245                 hnd = hhd->first;
2246         else {
2247                 hnd = hnd->next;
2248                 if (!hnd) {
2249                         iter->hidx++;
2250                         goto retry;
2251                 }
2252         }
2253
2254         if (WARN_ON_ONCE(!hnd))
2255                 return NULL;
2256
2257         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2258
2259         return iter;
2260 }
2261
2262 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2263 {
2264         struct ftrace_iterator *iter = m->private;
2265         void *p = NULL;
2266         loff_t l;
2267
2268         if (!(iter->flags & FTRACE_ITER_DO_HASH))
2269                 return NULL;
2270
2271         if (iter->func_pos > *pos)
2272                 return NULL;
2273
2274         iter->hidx = 0;
2275         for (l = 0; l <= (*pos - iter->func_pos); ) {
2276                 p = t_hash_next(m, &l);
2277                 if (!p)
2278                         break;
2279         }
2280         if (!p)
2281                 return NULL;
2282
2283         /* Only set this if we have an item */
2284         iter->flags |= FTRACE_ITER_HASH;
2285
2286         return iter;
2287 }
2288
2289 static int
2290 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2291 {
2292         struct ftrace_func_probe *rec;
2293
2294         rec = iter->probe;
2295         if (WARN_ON_ONCE(!rec))
2296                 return -EIO;
2297
2298         if (rec->ops->print)
2299                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2300
2301         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2302
2303         if (rec->data)
2304                 seq_printf(m, ":%p", rec->data);
2305         seq_putc(m, '\n');
2306
2307         return 0;
2308 }
2309
2310 static void *
2311 t_next(struct seq_file *m, void *v, loff_t *pos)
2312 {
2313         struct ftrace_iterator *iter = m->private;
2314         struct ftrace_ops *ops = iter->ops;
2315         struct dyn_ftrace *rec = NULL;
2316
2317         if (unlikely(ftrace_disabled))
2318                 return NULL;
2319
2320         if (iter->flags & FTRACE_ITER_HASH)
2321                 return t_hash_next(m, pos);
2322
2323         (*pos)++;
2324         iter->pos = iter->func_pos = *pos;
2325
2326         if (iter->flags & FTRACE_ITER_PRINTALL)
2327                 return t_hash_start(m, pos);
2328
2329  retry:
2330         if (iter->idx >= iter->pg->index) {
2331                 if (iter->pg->next) {
2332                         iter->pg = iter->pg->next;
2333                         iter->idx = 0;
2334                         goto retry;
2335                 }
2336         } else {
2337                 rec = &iter->pg->records[iter->idx++];
2338                 if (((iter->flags & FTRACE_ITER_FILTER) &&
2339                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2340
2341                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
2342                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2343
2344                     ((iter->flags & FTRACE_ITER_ENABLED) &&
2345                      !(rec->flags & ~FTRACE_FL_MASK))) {
2346
2347                         rec = NULL;
2348                         goto retry;
2349                 }
2350         }
2351
2352         if (!rec)
2353                 return t_hash_start(m, pos);
2354
2355         iter->func = rec;
2356
2357         return iter;
2358 }
2359
2360 static void reset_iter_read(struct ftrace_iterator *iter)
2361 {
2362         iter->pos = 0;
2363         iter->func_pos = 0;
2364         iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
2365 }
2366
2367 static void *t_start(struct seq_file *m, loff_t *pos)
2368 {
2369         struct ftrace_iterator *iter = m->private;
2370         struct ftrace_ops *ops = iter->ops;
2371         void *p = NULL;
2372         loff_t l;
2373
2374         mutex_lock(&ftrace_lock);
2375
2376         if (unlikely(ftrace_disabled))
2377                 return NULL;
2378
2379         /*
2380          * If an lseek was done, then reset and start from beginning.
2381          */
2382         if (*pos < iter->pos)
2383                 reset_iter_read(iter);
2384
2385         /*
2386          * For set_ftrace_filter reading, if we have the filter
2387          * off, we can short cut and just print out that all
2388          * functions are enabled.
2389          */
2390         if (iter->flags & FTRACE_ITER_FILTER &&
2391             ftrace_hash_empty(ops->filter_hash)) {
2392                 if (*pos > 0)
2393                         return t_hash_start(m, pos);
2394                 iter->flags |= FTRACE_ITER_PRINTALL;
2395                 /* reset in case of seek/pread */
2396                 iter->flags &= ~FTRACE_ITER_HASH;
2397                 return iter;
2398         }
2399
2400         if (iter->flags & FTRACE_ITER_HASH)
2401                 return t_hash_start(m, pos);
2402
2403         /*
2404          * Unfortunately, we need to restart at ftrace_pages_start
2405          * every time we let go of the ftrace_mutex. This is because
2406          * those pointers can change without the lock.
2407          */
2408         iter->pg = ftrace_pages_start;
2409         iter->idx = 0;
2410         for (l = 0; l <= *pos; ) {
2411                 p = t_next(m, p, &l);
2412                 if (!p)
2413                         break;
2414         }
2415
2416         if (!p)
2417                 return t_hash_start(m, pos);
2418
2419         return iter;
2420 }
2421
2422 static void t_stop(struct seq_file *m, void *p)
2423 {
2424         mutex_unlock(&ftrace_lock);
2425 }
2426
2427 static int t_show(struct seq_file *m, void *v)
2428 {
2429         struct ftrace_iterator *iter = m->private;
2430         struct dyn_ftrace *rec;
2431
2432         if (iter->flags & FTRACE_ITER_HASH)
2433                 return t_hash_show(m, iter);
2434
2435         if (iter->flags & FTRACE_ITER_PRINTALL) {
2436                 seq_printf(m, "#### all functions enabled ####\n");
2437                 return 0;
2438         }
2439
2440         rec = iter->func;
2441
2442         if (!rec)
2443                 return 0;
2444
2445         seq_printf(m, "%ps", (void *)rec->ip);
2446         if (iter->flags & FTRACE_ITER_ENABLED)
2447                 seq_printf(m, " (%ld)",
2448                            rec->flags & ~FTRACE_FL_MASK);
2449         seq_printf(m, "\n");
2450
2451         return 0;
2452 }
2453
2454 static const struct seq_operations show_ftrace_seq_ops = {
2455         .start = t_start,
2456         .next = t_next,
2457         .stop = t_stop,
2458         .show = t_show,
2459 };
2460
2461 static int
2462 ftrace_avail_open(struct inode *inode, struct file *file)
2463 {
2464         struct ftrace_iterator *iter;
2465         int ret;
2466
2467         if (unlikely(ftrace_disabled))
2468                 return -ENODEV;
2469
2470         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2471         if (!iter)
2472                 return -ENOMEM;
2473
2474         iter->pg = ftrace_pages_start;
2475         iter->ops = &global_ops;
2476
2477         ret = seq_open(file, &show_ftrace_seq_ops);
2478         if (!ret) {
2479                 struct seq_file *m = file->private_data;
2480
2481                 m->private = iter;
2482         } else {
2483                 kfree(iter);
2484         }
2485
2486         return ret;
2487 }
2488
2489 static int
2490 ftrace_enabled_open(struct inode *inode, struct file *file)
2491 {
2492         struct ftrace_iterator *iter;
2493         int ret;
2494
2495         if (unlikely(ftrace_disabled))
2496                 return -ENODEV;
2497
2498         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2499         if (!iter)
2500                 return -ENOMEM;
2501
2502         iter->pg = ftrace_pages_start;
2503         iter->flags = FTRACE_ITER_ENABLED;
2504         iter->ops = &global_ops;
2505
2506         ret = seq_open(file, &show_ftrace_seq_ops);
2507         if (!ret) {
2508                 struct seq_file *m = file->private_data;
2509
2510                 m->private = iter;
2511         } else {
2512                 kfree(iter);
2513         }
2514
2515         return ret;
2516 }
2517
2518 static void ftrace_filter_reset(struct ftrace_hash *hash)
2519 {
2520         mutex_lock(&ftrace_lock);
2521         ftrace_hash_clear(hash);
2522         mutex_unlock(&ftrace_lock);
2523 }
2524
2525 /**
2526  * ftrace_regex_open - initialize function tracer filter files
2527  * @ops: The ftrace_ops that hold the hash filters
2528  * @flag: The type of filter to process
2529  * @inode: The inode, usually passed in to your open routine
2530  * @file: The file, usually passed in to your open routine
2531  *
2532  * ftrace_regex_open() initializes the filter files for the
2533  * @ops. Depending on @flag it may process the filter hash or
2534  * the notrace hash of @ops. With this called from the open
2535  * routine, you can use ftrace_filter_write() for the write
2536  * routine if @flag has FTRACE_ITER_FILTER set, or
2537  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2538  * ftrace_regex_lseek() should be used as the lseek routine, and
2539  * release must call ftrace_regex_release().
2540  */
2541 int
2542 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2543                   struct inode *inode, struct file *file)
2544 {
2545         struct ftrace_iterator *iter;
2546         struct ftrace_hash *hash;
2547         int ret = 0;
2548
2549         if (unlikely(ftrace_disabled))
2550                 return -ENODEV;
2551
2552         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2553         if (!iter)
2554                 return -ENOMEM;
2555
2556         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2557                 kfree(iter);
2558                 return -ENOMEM;
2559         }
2560
2561         if (flag & FTRACE_ITER_NOTRACE)
2562                 hash = ops->notrace_hash;
2563         else
2564                 hash = ops->filter_hash;
2565
2566         iter->ops = ops;
2567         iter->flags = flag;
2568
2569         if (file->f_mode & FMODE_WRITE) {
2570                 mutex_lock(&ftrace_lock);
2571                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2572                 mutex_unlock(&ftrace_lock);
2573
2574                 if (!iter->hash) {
2575                         trace_parser_put(&iter->parser);
2576                         kfree(iter);
2577                         return -ENOMEM;
2578                 }
2579         }
2580
2581         mutex_lock(&ftrace_regex_lock);
2582
2583         if ((file->f_mode & FMODE_WRITE) &&
2584             (file->f_flags & O_TRUNC))
2585                 ftrace_filter_reset(iter->hash);
2586
2587         if (file->f_mode & FMODE_READ) {
2588                 iter->pg = ftrace_pages_start;
2589
2590                 ret = seq_open(file, &show_ftrace_seq_ops);
2591                 if (!ret) {
2592                         struct seq_file *m = file->private_data;
2593                         m->private = iter;
2594                 } else {
2595                         /* Failed */
2596                         free_ftrace_hash(iter->hash);
2597                         trace_parser_put(&iter->parser);
2598                         kfree(iter);
2599                 }
2600         } else
2601                 file->private_data = iter;
2602         mutex_unlock(&ftrace_regex_lock);
2603
2604         return ret;
2605 }
2606
2607 static int
2608 ftrace_filter_open(struct inode *inode, struct file *file)
2609 {
2610         return ftrace_regex_open(&global_ops,
2611                         FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2612                         inode, file);
2613 }
2614
2615 static int
2616 ftrace_notrace_open(struct inode *inode, struct file *file)
2617 {
2618         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2619                                  inode, file);
2620 }
2621
2622 loff_t
2623 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
2624 {
2625         loff_t ret;
2626
2627         if (file->f_mode & FMODE_READ)
2628                 ret = seq_lseek(file, offset, origin);
2629         else
2630                 file->f_pos = ret = 1;
2631
2632         return ret;
2633 }
2634
2635 static int ftrace_match(char *str, char *regex, int len, int type)
2636 {
2637         int matched = 0;
2638         int slen;
2639
2640         switch (type) {
2641         case MATCH_FULL:
2642                 if (strcmp(str, regex) == 0)
2643                         matched = 1;
2644                 break;
2645         case MATCH_FRONT_ONLY:
2646                 if (strncmp(str, regex, len) == 0)
2647                         matched = 1;
2648                 break;
2649         case MATCH_MIDDLE_ONLY:
2650                 if (strstr(str, regex))
2651                         matched = 1;
2652                 break;
2653         case MATCH_END_ONLY:
2654                 slen = strlen(str);
2655                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2656                         matched = 1;
2657                 break;
2658         }
2659
2660         return matched;
2661 }
2662
2663 static int
2664 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2665 {
2666         struct ftrace_func_entry *entry;
2667         int ret = 0;
2668
2669         entry = ftrace_lookup_ip(hash, rec->ip);
2670         if (not) {
2671                 /* Do nothing if it doesn't exist */
2672                 if (!entry)
2673                         return 0;
2674
2675                 free_hash_entry(hash, entry);
2676         } else {
2677                 /* Do nothing if it exists */
2678                 if (entry)
2679                         return 0;
2680
2681                 ret = add_hash_entry(hash, rec->ip);
2682         }
2683         return ret;
2684 }
2685
2686 static int
2687 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2688                     char *regex, int len, int type)
2689 {
2690         char str[KSYM_SYMBOL_LEN];
2691         char *modname;
2692
2693         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2694
2695         if (mod) {
2696                 /* module lookup requires matching the module */
2697                 if (!modname || strcmp(modname, mod))
2698                         return 0;
2699
2700                 /* blank search means to match all funcs in the mod */
2701                 if (!len)
2702                         return 1;
2703         }
2704
2705         return ftrace_match(str, regex, len, type);
2706 }
2707
2708 static int
2709 match_records(struct ftrace_hash *hash, char *buff,
2710               int len, char *mod, int not)
2711 {
2712         unsigned search_len = 0;
2713         struct ftrace_page *pg;
2714         struct dyn_ftrace *rec;
2715         int type = MATCH_FULL;
2716         char *search = buff;
2717         int found = 0;
2718         int ret;
2719
2720         if (len) {
2721                 type = filter_parse_regex(buff, len, &search, &not);
2722                 search_len = strlen(search);
2723         }
2724
2725         mutex_lock(&ftrace_lock);
2726
2727         if (unlikely(ftrace_disabled))
2728                 goto out_unlock;
2729
2730         do_for_each_ftrace_rec(pg, rec) {
2731                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2732                         ret = enter_record(hash, rec, not);
2733                         if (ret < 0) {
2734                                 found = ret;
2735                                 goto out_unlock;
2736                         }
2737                         found = 1;
2738                 }
2739         } while_for_each_ftrace_rec();
2740  out_unlock:
2741         mutex_unlock(&ftrace_lock);
2742
2743         return found;
2744 }
2745
2746 static int
2747 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2748 {
2749         return match_records(hash, buff, len, NULL, 0);
2750 }
2751
2752 static int
2753 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2754 {
2755         int not = 0;
2756
2757         /* blank or '*' mean the same */
2758         if (strcmp(buff, "*") == 0)
2759                 buff[0] = 0;
2760
2761         /* handle the case of 'dont filter this module' */
2762         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2763                 buff[0] = 0;
2764                 not = 1;
2765         }
2766
2767         return match_records(hash, buff, strlen(buff), mod, not);
2768 }
2769
2770 /*
2771  * We register the module command as a template to show others how
2772  * to register the a command as well.
2773  */
2774
2775 static int
2776 ftrace_mod_callback(struct ftrace_hash *hash,
2777                     char *func, char *cmd, char *param, int enable)
2778 {
2779         char *mod;
2780         int ret = -EINVAL;
2781
2782         /*
2783          * cmd == 'mod' because we only registered this func
2784          * for the 'mod' ftrace_func_command.
2785          * But if you register one func with multiple commands,
2786          * you can tell which command was used by the cmd
2787          * parameter.
2788          */
2789
2790         /* we must have a module name */
2791         if (!param)
2792                 return ret;
2793
2794         mod = strsep(&param, ":");
2795         if (!strlen(mod))
2796                 return ret;
2797
2798         ret = ftrace_match_module_records(hash, func, mod);
2799         if (!ret)
2800                 ret = -EINVAL;
2801         if (ret < 0)
2802                 return ret;
2803
2804         return 0;
2805 }
2806
2807 static struct ftrace_func_command ftrace_mod_cmd = {
2808         .name                   = "mod",
2809         .func                   = ftrace_mod_callback,
2810 };
2811
2812 static int __init ftrace_mod_cmd_init(void)
2813 {
2814         return register_ftrace_command(&ftrace_mod_cmd);
2815 }
2816 device_initcall(ftrace_mod_cmd_init);
2817
2818 static void
2819 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2820 {
2821         struct ftrace_func_probe *entry;
2822         struct hlist_head *hhd;
2823         struct hlist_node *n;
2824         unsigned long key;
2825
2826         key = hash_long(ip, FTRACE_HASH_BITS);
2827
2828         hhd = &ftrace_func_hash[key];
2829
2830         if (hlist_empty(hhd))
2831                 return;
2832
2833         /*
2834          * Disable preemption for these calls to prevent a RCU grace
2835          * period. This syncs the hash iteration and freeing of items
2836          * on the hash. rcu_read_lock is too dangerous here.
2837          */
2838         preempt_disable_notrace();
2839         hlist_for_each_entry_rcu(entry, n, hhd, node) {
2840                 if (entry->ip == ip)
2841                         entry->ops->func(ip, parent_ip, &entry->data);
2842         }
2843         preempt_enable_notrace();
2844 }
2845
2846 static struct ftrace_ops trace_probe_ops __read_mostly =
2847 {
2848         .func           = function_trace_probe_call,
2849 };
2850
2851 static int ftrace_probe_registered;
2852
2853 static void __enable_ftrace_function_probe(void)
2854 {
2855         int ret;
2856         int i;
2857
2858         if (ftrace_probe_registered)
2859                 return;
2860
2861         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2862                 struct hlist_head *hhd = &ftrace_func_hash[i];
2863                 if (hhd->first)
2864                         break;
2865         }
2866         /* Nothing registered? */
2867         if (i == FTRACE_FUNC_HASHSIZE)
2868                 return;
2869
2870         ret = __register_ftrace_function(&trace_probe_ops);
2871         if (!ret)
2872                 ret = ftrace_startup(&trace_probe_ops, 0);
2873
2874         ftrace_probe_registered = 1;
2875 }
2876
2877 static void __disable_ftrace_function_probe(void)
2878 {
2879         int ret;
2880         int i;
2881
2882         if (!ftrace_probe_registered)
2883                 return;
2884
2885         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2886                 struct hlist_head *hhd = &ftrace_func_hash[i];
2887                 if (hhd->first)
2888                         return;
2889         }
2890
2891         /* no more funcs left */
2892         ret = __unregister_ftrace_function(&trace_probe_ops);
2893         if (!ret)
2894                 ftrace_shutdown(&trace_probe_ops, 0);
2895
2896         ftrace_probe_registered = 0;
2897 }
2898
2899
2900 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2901 {
2902         struct ftrace_func_probe *entry =
2903                 container_of(rhp, struct ftrace_func_probe, rcu);
2904
2905         if (entry->ops->free)
2906                 entry->ops->free(&entry->data);
2907         kfree(entry);
2908 }
2909
2910
2911 int
2912 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2913                               void *data)
2914 {
2915         struct ftrace_func_probe *entry;
2916         struct ftrace_page *pg;
2917         struct dyn_ftrace *rec;
2918         int type, len, not;
2919         unsigned long key;
2920         int count = 0;
2921         char *search;
2922
2923         type = filter_parse_regex(glob, strlen(glob), &search, &not);
2924         len = strlen(search);
2925
2926         /* we do not support '!' for function probes */
2927         if (WARN_ON(not))
2928                 return -EINVAL;
2929
2930         mutex_lock(&ftrace_lock);
2931
2932         if (unlikely(ftrace_disabled))
2933                 goto out_unlock;
2934
2935         do_for_each_ftrace_rec(pg, rec) {
2936
2937                 if (!ftrace_match_record(rec, NULL, search, len, type))
2938                         continue;
2939
2940                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2941                 if (!entry) {
2942                         /* If we did not process any, then return error */
2943                         if (!count)
2944                                 count = -ENOMEM;
2945                         goto out_unlock;
2946                 }
2947
2948                 count++;
2949
2950                 entry->data = data;
2951
2952                 /*
2953                  * The caller might want to do something special
2954                  * for each function we find. We call the callback
2955                  * to give the caller an opportunity to do so.
2956                  */
2957                 if (ops->callback) {
2958                         if (ops->callback(rec->ip, &entry->data) < 0) {
2959                                 /* caller does not like this func */
2960                                 kfree(entry);
2961                                 continue;
2962                         }
2963                 }
2964
2965                 entry->ops = ops;
2966                 entry->ip = rec->ip;
2967
2968                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2969                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2970
2971         } while_for_each_ftrace_rec();
2972         __enable_ftrace_function_probe();
2973
2974  out_unlock:
2975         mutex_unlock(&ftrace_lock);
2976
2977         return count;
2978 }
2979
2980 enum {
2981         PROBE_TEST_FUNC         = 1,
2982         PROBE_TEST_DATA         = 2
2983 };
2984
2985 static void
2986 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2987                                   void *data, int flags)
2988 {
2989         struct ftrace_func_probe *entry;
2990         struct hlist_node *n, *tmp;
2991         char str[KSYM_SYMBOL_LEN];
2992         int type = MATCH_FULL;
2993         int i, len = 0;
2994         char *search;
2995
2996         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2997                 glob = NULL;
2998         else if (glob) {
2999                 int not;
3000
3001                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3002                 len = strlen(search);
3003
3004                 /* we do not support '!' for function probes */
3005                 if (WARN_ON(not))
3006                         return;
3007         }
3008
3009         mutex_lock(&ftrace_lock);
3010         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3011                 struct hlist_head *hhd = &ftrace_func_hash[i];
3012
3013                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
3014
3015                         /* break up if statements for readability */
3016                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3017                                 continue;
3018
3019                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
3020                                 continue;
3021
3022                         /* do this last, since it is the most expensive */
3023                         if (glob) {
3024                                 kallsyms_lookup(entry->ip, NULL, NULL,
3025                                                 NULL, str);
3026                                 if (!ftrace_match(str, glob, len, type))
3027                                         continue;
3028                         }
3029
3030                         hlist_del(&entry->node);
3031                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
3032                 }
3033         }
3034         __disable_ftrace_function_probe();
3035         mutex_unlock(&ftrace_lock);
3036 }
3037
3038 void
3039 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3040                                 void *data)
3041 {
3042         __unregister_ftrace_function_probe(glob, ops, data,
3043                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
3044 }
3045
3046 void
3047 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3048 {
3049         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3050 }
3051
3052 void unregister_ftrace_function_probe_all(char *glob)
3053 {
3054         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3055 }
3056
3057 static LIST_HEAD(ftrace_commands);
3058 static DEFINE_MUTEX(ftrace_cmd_mutex);
3059
3060 int register_ftrace_command(struct ftrace_func_command *cmd)
3061 {
3062         struct ftrace_func_command *p;
3063         int ret = 0;
3064
3065         mutex_lock(&ftrace_cmd_mutex);
3066         list_for_each_entry(p, &ftrace_commands, list) {
3067                 if (strcmp(cmd->name, p->name) == 0) {
3068                         ret = -EBUSY;
3069                         goto out_unlock;
3070                 }
3071         }
3072         list_add(&cmd->list, &ftrace_commands);
3073  out_unlock:
3074         mutex_unlock(&ftrace_cmd_mutex);
3075
3076         return ret;
3077 }
3078
3079 int unregister_ftrace_command(struct ftrace_func_command *cmd)
3080 {
3081         struct ftrace_func_command *p, *n;
3082         int ret = -ENODEV;
3083
3084         mutex_lock(&ftrace_cmd_mutex);
3085         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3086                 if (strcmp(cmd->name, p->name) == 0) {
3087                         ret = 0;
3088                         list_del_init(&p->list);
3089                         goto out_unlock;
3090                 }
3091         }
3092  out_unlock:
3093         mutex_unlock(&ftrace_cmd_mutex);
3094
3095         return ret;
3096 }
3097
3098 static int ftrace_process_regex(struct ftrace_hash *hash,
3099                                 char *buff, int len, int enable)
3100 {
3101         char *func, *command, *next = buff;
3102         struct ftrace_func_command *p;
3103         int ret = -EINVAL;
3104
3105         func = strsep(&next, ":");
3106
3107         if (!next) {
3108                 ret = ftrace_match_records(hash, func, len);
3109                 if (!ret)
3110                         ret = -EINVAL;
3111                 if (ret < 0)
3112                         return ret;
3113                 return 0;
3114         }
3115
3116         /* command found */
3117
3118         command = strsep(&next, ":");
3119
3120         mutex_lock(&ftrace_cmd_mutex);
3121         list_for_each_entry(p, &ftrace_commands, list) {
3122                 if (strcmp(p->name, command) == 0) {
3123                         ret = p->func(hash, func, command, next, enable);
3124                         goto out_unlock;
3125                 }
3126         }
3127  out_unlock:
3128         mutex_unlock(&ftrace_cmd_mutex);
3129
3130         return ret;
3131 }
3132
3133 static ssize_t
3134 ftrace_regex_write(struct file *file, const char __user *ubuf,
3135                    size_t cnt, loff_t *ppos, int enable)
3136 {
3137         struct ftrace_iterator *iter;
3138         struct trace_parser *parser;
3139         ssize_t ret, read;
3140
3141         if (!cnt)
3142                 return 0;
3143
3144         mutex_lock(&ftrace_regex_lock);
3145
3146         ret = -ENODEV;
3147         if (unlikely(ftrace_disabled))
3148                 goto out_unlock;
3149
3150         if (file->f_mode & FMODE_READ) {
3151                 struct seq_file *m = file->private_data;
3152                 iter = m->private;
3153         } else
3154                 iter = file->private_data;
3155
3156         parser = &iter->parser;
3157         read = trace_get_user(parser, ubuf, cnt, ppos);
3158
3159         if (read >= 0 && trace_parser_loaded(parser) &&
3160             !trace_parser_cont(parser)) {
3161                 ret = ftrace_process_regex(iter->hash, parser->buffer,
3162                                            parser->idx, enable);
3163                 trace_parser_clear(parser);
3164                 if (ret)
3165                         goto out_unlock;
3166         }
3167
3168         ret = read;
3169 out_unlock:
3170         mutex_unlock(&ftrace_regex_lock);
3171
3172         return ret;
3173 }
3174
3175 ssize_t
3176 ftrace_filter_write(struct file *file, const char __user *ubuf,
3177                     size_t cnt, loff_t *ppos)
3178 {
3179         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3180 }
3181
3182 ssize_t
3183 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3184                      size_t cnt, loff_t *ppos)
3185 {
3186         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3187 }
3188
3189 static int
3190 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3191                  int reset, int enable)
3192 {
3193         struct ftrace_hash **orig_hash;
3194         struct ftrace_hash *hash;
3195         int ret;
3196
3197         /* All global ops uses the global ops filters */
3198         if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3199                 ops = &global_ops;
3200
3201         if (unlikely(ftrace_disabled))
3202                 return -ENODEV;
3203
3204         if (enable)
3205                 orig_hash = &ops->filter_hash;
3206         else
3207                 orig_hash = &ops->notrace_hash;
3208
3209         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3210         if (!hash)
3211                 return -ENOMEM;
3212
3213         mutex_lock(&ftrace_regex_lock);
3214         if (reset)
3215                 ftrace_filter_reset(hash);
3216         if (buf && !ftrace_match_records(hash, buf, len)) {
3217                 ret = -EINVAL;
3218                 goto out_regex_unlock;
3219         }
3220
3221         mutex_lock(&ftrace_lock);
3222         ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3223         if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
3224             && ftrace_enabled)
3225                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3226
3227         mutex_unlock(&ftrace_lock);
3228
3229  out_regex_unlock:
3230         mutex_unlock(&ftrace_regex_lock);
3231
3232         free_ftrace_hash(hash);
3233         return ret;
3234 }
3235
3236 /**
3237  * ftrace_set_filter - set a function to filter on in ftrace
3238  * @ops - the ops to set the filter with
3239  * @buf - the string that holds the function filter text.
3240  * @len - the length of the string.
3241  * @reset - non zero to reset all filters before applying this filter.
3242  *
3243  * Filters denote which functions should be enabled when tracing is enabled.
3244  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3245  */
3246 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3247                        int len, int reset)
3248 {
3249         return ftrace_set_regex(ops, buf, len, reset, 1);
3250 }
3251 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3252
3253 /**
3254  * ftrace_set_notrace - set a function to not trace in ftrace
3255  * @ops - the ops to set the notrace filter with
3256  * @buf - the string that holds the function notrace text.
3257  * @len - the length of the string.
3258  * @reset - non zero to reset all filters before applying this filter.
3259  *
3260  * Notrace Filters denote which functions should not be enabled when tracing
3261  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3262  * for tracing.
3263  */
3264 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3265                         int len, int reset)
3266 {
3267         return ftrace_set_regex(ops, buf, len, reset, 0);
3268 }
3269 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3270 /**
3271  * ftrace_set_filter - set a function to filter on in ftrace
3272  * @ops - the ops to set the filter with
3273  * @buf - the string that holds the function filter text.
3274  * @len - the length of the string.
3275  * @reset - non zero to reset all filters before applying this filter.
3276  *
3277  * Filters denote which functions should be enabled when tracing is enabled.
3278  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3279  */
3280 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3281 {
3282         ftrace_set_regex(&global_ops, buf, len, reset, 1);
3283 }
3284 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3285
3286 /**
3287  * ftrace_set_notrace - set a function to not trace in ftrace
3288  * @ops - the ops to set the notrace filter with
3289  * @buf - the string that holds the function notrace text.
3290  * @len - the length of the string.
3291  * @reset - non zero to reset all filters before applying this filter.
3292  *
3293  * Notrace Filters denote which functions should not be enabled when tracing
3294  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3295  * for tracing.
3296  */
3297 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3298 {
3299         ftrace_set_regex(&global_ops, buf, len, reset, 0);
3300 }
3301 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3302
3303 /*
3304  * command line interface to allow users to set filters on boot up.
3305  */
3306 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
3307 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3308 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3309
3310 static int __init set_ftrace_notrace(char *str)
3311 {
3312         strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3313         return 1;
3314 }
3315 __setup("ftrace_notrace=", set_ftrace_notrace);
3316
3317 static int __init set_ftrace_filter(char *str)
3318 {
3319         strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3320         return 1;
3321 }
3322 __setup("ftrace_filter=", set_ftrace_filter);
3323
3324 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3325 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3326 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3327
3328 static int __init set_graph_function(char *str)
3329 {
3330         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3331         return 1;
3332 }
3333 __setup("ftrace_graph_filter=", set_graph_function);
3334
3335 static void __init set_ftrace_early_graph(char *buf)
3336 {
3337         int ret;
3338         char *func;
3339
3340         while (buf) {
3341                 func = strsep(&buf, ",");
3342                 /* we allow only one expression at a time */
3343                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3344                                       func);
3345                 if (ret)
3346                         printk(KERN_DEBUG "ftrace: function %s not "
3347                                           "traceable\n", func);
3348         }
3349 }
3350 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3351
3352 void __init
3353 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3354 {
3355         char *func;
3356
3357         while (buf) {
3358                 func = strsep(&buf, ",");
3359                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3360         }
3361 }
3362
3363 static void __init set_ftrace_early_filters(void)
3364 {
3365         if (ftrace_filter_buf[0])
3366                 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3367         if (ftrace_notrace_buf[0])
3368                 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3369 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3370         if (ftrace_graph_buf[0])
3371                 set_ftrace_early_graph(ftrace_graph_buf);
3372 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3373 }
3374
3375 int ftrace_regex_release(struct inode *inode, struct file *file)
3376 {
3377         struct seq_file *m = (struct seq_file *)file->private_data;
3378         struct ftrace_iterator *iter;
3379         struct ftrace_hash **orig_hash;
3380         struct trace_parser *parser;
3381         int filter_hash;
3382         int ret;
3383
3384         mutex_lock(&ftrace_regex_lock);
3385         if (file->f_mode & FMODE_READ) {
3386                 iter = m->private;
3387
3388                 seq_release(inode, file);
3389         } else
3390                 iter = file->private_data;
3391
3392         parser = &iter->parser;
3393         if (trace_parser_loaded(parser)) {
3394                 parser->buffer[parser->idx] = 0;
3395                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3396         }
3397
3398         trace_parser_put(parser);
3399
3400         if (file->f_mode & FMODE_WRITE) {
3401                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3402
3403                 if (filter_hash)
3404                         orig_hash = &iter->ops->filter_hash;
3405                 else
3406                         orig_hash = &iter->ops->notrace_hash;
3407
3408                 mutex_lock(&ftrace_lock);
3409                 ret = ftrace_hash_move(iter->ops, filter_hash,
3410                                        orig_hash, iter->hash);
3411                 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3412                     && ftrace_enabled)
3413                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3414
3415                 mutex_unlock(&ftrace_lock);
3416         }
3417         free_ftrace_hash(iter->hash);
3418         kfree(iter);
3419
3420         mutex_unlock(&ftrace_regex_lock);
3421         return 0;
3422 }
3423
3424 static const struct file_operations ftrace_avail_fops = {
3425         .open = ftrace_avail_open,
3426         .read = seq_read,
3427         .llseek = seq_lseek,
3428         .release = seq_release_private,
3429 };
3430
3431 static const struct file_operations ftrace_enabled_fops = {
3432         .open = ftrace_enabled_open,
3433         .read = seq_read,
3434         .llseek = seq_lseek,
3435         .release = seq_release_private,
3436 };
3437
3438 static const struct file_operations ftrace_filter_fops = {
3439         .open = ftrace_filter_open,
3440         .read = seq_read,
3441         .write = ftrace_filter_write,
3442         .llseek = ftrace_regex_lseek,
3443         .release = ftrace_regex_release,
3444 };
3445
3446 static const struct file_operations ftrace_notrace_fops = {
3447         .open = ftrace_notrace_open,
3448         .read = seq_read,
3449         .write = ftrace_notrace_write,
3450         .llseek = ftrace_regex_lseek,
3451         .release = ftrace_regex_release,
3452 };
3453
3454 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3455
3456 static DEFINE_MUTEX(graph_lock);
3457
3458 int ftrace_graph_count;
3459 int ftrace_graph_filter_enabled;
3460 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3461
3462 static void *
3463 __g_next(struct seq_file *m, loff_t *pos)
3464 {
3465         if (*pos >= ftrace_graph_count)
3466                 return NULL;
3467         return &ftrace_graph_funcs[*pos];
3468 }
3469
3470 static void *
3471 g_next(struct seq_file *m, void *v, loff_t *pos)
3472 {
3473         (*pos)++;
3474         return __g_next(m, pos);
3475 }
3476
3477 static void *g_start(struct seq_file *m, loff_t *pos)
3478 {
3479         mutex_lock(&graph_lock);
3480
3481         /* Nothing, tell g_show to print all functions are enabled */
3482         if (!ftrace_graph_filter_enabled && !*pos)
3483                 return (void *)1;
3484
3485         return __g_next(m, pos);
3486 }
3487
3488 static void g_stop(struct seq_file *m, void *p)
3489 {
3490         mutex_unlock(&graph_lock);
3491 }
3492
3493 static int g_show(struct seq_file *m, void *v)
3494 {
3495         unsigned long *ptr = v;
3496
3497         if (!ptr)
3498                 return 0;
3499
3500         if (ptr == (unsigned long *)1) {
3501                 seq_printf(m, "#### all functions enabled ####\n");
3502                 return 0;
3503         }
3504
3505         seq_printf(m, "%ps\n", (void *)*ptr);
3506
3507         return 0;
3508 }
3509
3510 static const struct seq_operations ftrace_graph_seq_ops = {
3511         .start = g_start,
3512         .next = g_next,
3513         .stop = g_stop,
3514         .show = g_show,
3515 };
3516
3517 static int
3518 ftrace_graph_open(struct inode *inode, struct file *file)
3519 {
3520         int ret = 0;
3521
3522         if (unlikely(ftrace_disabled))
3523                 return -ENODEV;
3524
3525         mutex_lock(&graph_lock);
3526         if ((file->f_mode & FMODE_WRITE) &&
3527             (file->f_flags & O_TRUNC)) {
3528                 ftrace_graph_filter_enabled = 0;
3529                 ftrace_graph_count = 0;
3530                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3531         }
3532         mutex_unlock(&graph_lock);
3533
3534         if (file->f_mode & FMODE_READ)
3535                 ret = seq_open(file, &ftrace_graph_seq_ops);
3536
3537         return ret;
3538 }
3539
3540 static int
3541 ftrace_graph_release(struct inode *inode, struct file *file)
3542 {
3543         if (file->f_mode & FMODE_READ)
3544                 seq_release(inode, file);
3545         return 0;
3546 }
3547
3548 static int
3549 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3550 {
3551         struct dyn_ftrace *rec;
3552         struct ftrace_page *pg;
3553         int search_len;
3554         int fail = 1;
3555         int type, not;
3556         char *search;
3557         bool exists;
3558         int i;
3559
3560         /* decode regex */
3561         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3562         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3563                 return -EBUSY;
3564
3565         search_len = strlen(search);
3566
3567         mutex_lock(&ftrace_lock);
3568
3569         if (unlikely(ftrace_disabled)) {
3570                 mutex_unlock(&ftrace_lock);
3571                 return -ENODEV;
3572         }
3573
3574         do_for_each_ftrace_rec(pg, rec) {
3575
3576                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3577                         /* if it is in the array */
3578                         exists = false;
3579                         for (i = 0; i < *idx; i++) {
3580                                 if (array[i] == rec->ip) {
3581                                         exists = true;
3582                                         break;
3583                                 }
3584                         }
3585
3586                         if (!not) {
3587                                 fail = 0;
3588                                 if (!exists) {
3589                                         array[(*idx)++] = rec->ip;
3590                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3591                                                 goto out;
3592                                 }
3593                         } else {
3594                                 if (exists) {
3595                                         array[i] = array[--(*idx)];
3596                                         array[*idx] = 0;
3597                                         fail = 0;
3598                                 }
3599                         }
3600                 }
3601         } while_for_each_ftrace_rec();
3602 out:
3603         mutex_unlock(&ftrace_lock);
3604
3605         if (fail)
3606                 return -EINVAL;
3607
3608         ftrace_graph_filter_enabled = 1;
3609         return 0;
3610 }
3611
3612 static ssize_t
3613 ftrace_graph_write(struct file *file, const char __user *ubuf,
3614                    size_t cnt, loff_t *ppos)
3615 {
3616         struct trace_parser parser;
3617         ssize_t read, ret;
3618
3619         if (!cnt)
3620                 return 0;
3621
3622         mutex_lock(&graph_lock);
3623
3624         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3625                 ret = -ENOMEM;
3626                 goto out_unlock;
3627         }
3628
3629         read = trace_get_user(&parser, ubuf, cnt, ppos);
3630
3631         if (read >= 0 && trace_parser_loaded((&parser))) {
3632                 parser.buffer[parser.idx] = 0;
3633
3634                 /* we allow only one expression at a time */
3635                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3636                                         parser.buffer);
3637                 if (ret)
3638                         goto out_free;
3639         }
3640
3641         ret = read;
3642
3643 out_free:
3644         trace_parser_put(&parser);
3645 out_unlock:
3646         mutex_unlock(&graph_lock);
3647
3648         return ret;
3649 }
3650
3651 static const struct file_operations ftrace_graph_fops = {
3652         .open           = ftrace_graph_open,
3653         .read           = seq_read,
3654         .write          = ftrace_graph_write,
3655         .release        = ftrace_graph_release,
3656         .llseek         = seq_lseek,
3657 };
3658 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3659
3660 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3661 {
3662
3663         trace_create_file("available_filter_functions", 0444,
3664                         d_tracer, NULL, &ftrace_avail_fops);
3665
3666         trace_create_file("enabled_functions", 0444,
3667                         d_tracer, NULL, &ftrace_enabled_fops);
3668
3669         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3670                         NULL, &ftrace_filter_fops);
3671
3672         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3673                                     NULL, &ftrace_notrace_fops);
3674
3675 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3676         trace_create_file("set_graph_function", 0444, d_tracer,
3677                                     NULL,
3678                                     &ftrace_graph_fops);
3679 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3680
3681         return 0;
3682 }
3683
3684 static void ftrace_swap_recs(void *a, void *b, int size)
3685 {
3686         struct dyn_ftrace *reca = a;
3687         struct dyn_ftrace *recb = b;
3688         struct dyn_ftrace t;
3689
3690         t = *reca;
3691         *reca = *recb;
3692         *recb = t;
3693 }
3694
3695 static int ftrace_process_locs(struct module *mod,
3696                                unsigned long *start,
3697                                unsigned long *end)
3698 {
3699         struct ftrace_page *pg;
3700         unsigned long count;
3701         unsigned long *p;
3702         unsigned long addr;
3703         unsigned long flags = 0; /* Shut up gcc */
3704         int ret = -ENOMEM;
3705
3706         count = end - start;
3707
3708         if (!count)
3709                 return 0;
3710
3711         pg = ftrace_allocate_pages(count);
3712         if (!pg)
3713                 return -ENOMEM;
3714
3715         mutex_lock(&ftrace_lock);
3716
3717         /*
3718          * Core and each module needs their own pages, as
3719          * modules will free them when they are removed.
3720          * Force a new page to be allocated for modules.
3721          */
3722         if (!mod) {
3723                 WARN_ON(ftrace_pages || ftrace_pages_start);
3724                 /* First initialization */
3725                 ftrace_pages = ftrace_pages_start = pg;
3726         } else {
3727                 if (!ftrace_pages)
3728                         goto out;
3729
3730                 if (WARN_ON(ftrace_pages->next)) {
3731                         /* Hmm, we have free pages? */
3732                         while (ftrace_pages->next)
3733                                 ftrace_pages = ftrace_pages->next;
3734                 }
3735
3736                 ftrace_pages->next = pg;
3737                 ftrace_pages = pg;
3738         }
3739
3740         p = start;
3741         while (p < end) {
3742                 addr = ftrace_call_adjust(*p++);
3743                 /*
3744                  * Some architecture linkers will pad between
3745                  * the different mcount_loc sections of different
3746                  * object files to satisfy alignments.
3747                  * Skip any NULL pointers.
3748                  */
3749                 if (!addr)
3750                         continue;
3751                 if (!ftrace_record_ip(addr))
3752                         break;
3753         }
3754
3755         /* These new locations need to be initialized */
3756         ftrace_new_pgs = pg;
3757
3758         /* Make each individual set of pages sorted by ips */
3759         for (; pg; pg = pg->next)
3760                 sort(pg->records, pg->index, sizeof(struct dyn_ftrace),
3761                      ftrace_cmp_recs, ftrace_swap_recs);
3762
3763         /*
3764          * We only need to disable interrupts on start up
3765          * because we are modifying code that an interrupt
3766          * may execute, and the modification is not atomic.
3767          * But for modules, nothing runs the code we modify
3768          * until we are finished with it, and there's no
3769          * reason to cause large interrupt latencies while we do it.
3770          */
3771         if (!mod)
3772                 local_irq_save(flags);
3773         ftrace_update_code(mod);
3774         if (!mod)
3775                 local_irq_restore(flags);
3776         ret = 0;
3777  out:
3778         mutex_unlock(&ftrace_lock);
3779
3780         return ret;
3781 }
3782
3783 #ifdef CONFIG_MODULES
3784
3785 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
3786
3787 void ftrace_release_mod(struct module *mod)
3788 {
3789         struct dyn_ftrace *rec;
3790         struct ftrace_page **last_pg;
3791         struct ftrace_page *pg;
3792         int order;
3793
3794         mutex_lock(&ftrace_lock);
3795
3796         if (ftrace_disabled)
3797                 goto out_unlock;
3798
3799         /*
3800          * Each module has its own ftrace_pages, remove
3801          * them from the list.
3802          */
3803         last_pg = &ftrace_pages_start;
3804         for (pg = ftrace_pages_start; pg; pg = *last_pg) {
3805                 rec = &pg->records[0];
3806                 if (within_module_core(rec->ip, mod)) {
3807                         /*
3808                          * As core pages are first, the first
3809                          * page should never be a module page.
3810                          */
3811                         if (WARN_ON(pg == ftrace_pages_start))
3812                                 goto out_unlock;
3813
3814                         /* Check if we are deleting the last page */
3815                         if (pg == ftrace_pages)
3816                                 ftrace_pages = next_to_ftrace_page(last_pg);
3817
3818                         *last_pg = pg->next;
3819                         order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3820                         free_pages((unsigned long)pg->records, order);
3821                         kfree(pg);
3822                 } else
3823                         last_pg = &pg->next;
3824         }
3825  out_unlock:
3826         mutex_unlock(&ftrace_lock);
3827 }
3828
3829 static void ftrace_init_module(struct module *mod,
3830                                unsigned long *start, unsigned long *end)
3831 {
3832         if (ftrace_disabled || start == end)
3833                 return;
3834         ftrace_process_locs(mod, start, end);
3835 }
3836
3837 static int ftrace_module_notify(struct notifier_block *self,
3838                                 unsigned long val, void *data)
3839 {
3840         struct module *mod = data;
3841
3842         switch (val) {
3843         case MODULE_STATE_COMING:
3844                 ftrace_init_module(mod, mod->ftrace_callsites,
3845                                    mod->ftrace_callsites +
3846                                    mod->num_ftrace_callsites);
3847                 break;
3848         case MODULE_STATE_GOING:
3849                 ftrace_release_mod(mod);
3850                 break;
3851         }
3852
3853         return 0;
3854 }
3855 #else
3856 static int ftrace_module_notify(struct notifier_block *self,
3857                                 unsigned long val, void *data)
3858 {
3859         return 0;
3860 }
3861 #endif /* CONFIG_MODULES */
3862
3863 struct notifier_block ftrace_module_nb = {
3864         .notifier_call = ftrace_module_notify,
3865         .priority = 0,
3866 };
3867
3868 extern unsigned long __start_mcount_loc[];
3869 extern unsigned long __stop_mcount_loc[];
3870
3871 void __init ftrace_init(void)
3872 {
3873         unsigned long count, addr, flags;
3874         int ret;
3875
3876         /* Keep the ftrace pointer to the stub */
3877         addr = (unsigned long)ftrace_stub;
3878
3879         local_irq_save(flags);
3880         ftrace_dyn_arch_init(&addr);
3881         local_irq_restore(flags);
3882
3883         /* ftrace_dyn_arch_init places the return code in addr */
3884         if (addr)
3885                 goto failed;
3886
3887         count = __stop_mcount_loc - __start_mcount_loc;
3888
3889         ret = ftrace_dyn_table_alloc(count);
3890         if (ret)
3891                 goto failed;
3892
3893         last_ftrace_enabled = ftrace_enabled = 1;
3894
3895         ret = ftrace_process_locs(NULL,
3896                                   __start_mcount_loc,
3897                                   __stop_mcount_loc);
3898
3899         ret = register_module_notifier(&ftrace_module_nb);
3900         if (ret)
3901                 pr_warning("Failed to register trace ftrace module notifier\n");
3902
3903         set_ftrace_early_filters();
3904
3905         return;
3906  failed:
3907         ftrace_disabled = 1;
3908 }
3909
3910 #else
3911
3912 static struct ftrace_ops global_ops = {
3913         .func                   = ftrace_stub,
3914 };
3915
3916 static int __init ftrace_nodyn_init(void)
3917 {
3918         ftrace_enabled = 1;
3919         return 0;
3920 }
3921 device_initcall(ftrace_nodyn_init);
3922
3923 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3924 static inline void ftrace_startup_enable(int command) { }
3925 /* Keep as macros so we do not need to define the commands */
3926 # define ftrace_startup(ops, command)                   \
3927         ({                                              \
3928                 (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
3929                 0;                                      \
3930         })
3931 # define ftrace_shutdown(ops, command)  do { } while (0)
3932 # define ftrace_startup_sysctl()        do { } while (0)
3933 # define ftrace_shutdown_sysctl()       do { } while (0)
3934
3935 static inline int
3936 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3937 {
3938         return 1;
3939 }
3940
3941 #endif /* CONFIG_DYNAMIC_FTRACE */
3942
3943 static void
3944 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
3945 {
3946         struct ftrace_ops *op;
3947
3948         if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
3949                 return;
3950
3951         /*
3952          * Some of the ops may be dynamically allocated,
3953          * they must be freed after a synchronize_sched().
3954          */
3955         preempt_disable_notrace();
3956         trace_recursion_set(TRACE_CONTROL_BIT);
3957         op = rcu_dereference_raw(ftrace_control_list);
3958         while (op != &ftrace_list_end) {
3959                 if (!ftrace_function_local_disabled(op) &&
3960                     ftrace_ops_test(op, ip))
3961                         op->func(ip, parent_ip);
3962
3963                 op = rcu_dereference_raw(op->next);
3964         };
3965         trace_recursion_clear(TRACE_CONTROL_BIT);
3966         preempt_enable_notrace();
3967 }
3968
3969 static struct ftrace_ops control_ops = {
3970         .func = ftrace_ops_control_func,
3971 };
3972
3973 static void
3974 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3975 {
3976         struct ftrace_ops *op;
3977
3978         if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3979                 return;
3980
3981         trace_recursion_set(TRACE_INTERNAL_BIT);
3982         /*
3983          * Some of the ops may be dynamically allocated,
3984          * they must be freed after a synchronize_sched().
3985          */
3986         preempt_disable_notrace();
3987         op = rcu_dereference_raw(ftrace_ops_list);
3988         while (op != &ftrace_list_end) {
3989                 if (ftrace_ops_test(op, ip))
3990                         op->func(ip, parent_ip);
3991                 op = rcu_dereference_raw(op->next);
3992         };
3993         preempt_enable_notrace();
3994         trace_recursion_clear(TRACE_INTERNAL_BIT);
3995 }
3996
3997 static void clear_ftrace_swapper(void)
3998 {
3999         struct task_struct *p;
4000         int cpu;
4001
4002         get_online_cpus();
4003         for_each_online_cpu(cpu) {
4004                 p = idle_task(cpu);
4005                 clear_tsk_trace_trace(p);
4006         }
4007         put_online_cpus();
4008 }
4009
4010 static void set_ftrace_swapper(void)
4011 {
4012         struct task_struct *p;
4013         int cpu;
4014
4015         get_online_cpus();
4016         for_each_online_cpu(cpu) {
4017                 p = idle_task(cpu);
4018                 set_tsk_trace_trace(p);
4019         }
4020         put_online_cpus();
4021 }
4022
4023 static void clear_ftrace_pid(struct pid *pid)
4024 {
4025         struct task_struct *p;
4026
4027         rcu_read_lock();
4028         do_each_pid_task(pid, PIDTYPE_PID, p) {
4029                 clear_tsk_trace_trace(p);
4030         } while_each_pid_task(pid, PIDTYPE_PID, p);
4031         rcu_read_unlock();
4032
4033         put_pid(pid);
4034 }
4035
4036 static void set_ftrace_pid(struct pid *pid)
4037 {
4038         struct task_struct *p;
4039
4040         rcu_read_lock();
4041         do_each_pid_task(pid, PIDTYPE_PID, p) {
4042                 set_tsk_trace_trace(p);
4043         } while_each_pid_task(pid, PIDTYPE_PID, p);
4044         rcu_read_unlock();
4045 }
4046
4047 static void clear_ftrace_pid_task(struct pid *pid)
4048 {
4049         if (pid == ftrace_swapper_pid)
4050                 clear_ftrace_swapper();
4051         else
4052                 clear_ftrace_pid(pid);
4053 }
4054
4055 static void set_ftrace_pid_task(struct pid *pid)
4056 {
4057         if (pid == ftrace_swapper_pid)
4058                 set_ftrace_swapper();
4059         else
4060                 set_ftrace_pid(pid);
4061 }
4062
4063 static int ftrace_pid_add(int p)
4064 {
4065         struct pid *pid;
4066         struct ftrace_pid *fpid;
4067         int ret = -EINVAL;
4068
4069         mutex_lock(&ftrace_lock);
4070
4071         if (!p)
4072                 pid = ftrace_swapper_pid;
4073         else
4074                 pid = find_get_pid(p);
4075
4076         if (!pid)
4077                 goto out;
4078
4079         ret = 0;
4080
4081         list_for_each_entry(fpid, &ftrace_pids, list)
4082                 if (fpid->pid == pid)
4083                         goto out_put;
4084
4085         ret = -ENOMEM;
4086
4087         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4088         if (!fpid)
4089                 goto out_put;
4090
4091         list_add(&fpid->list, &ftrace_pids);
4092         fpid->pid = pid;
4093
4094         set_ftrace_pid_task(pid);
4095
4096         ftrace_update_pid_func();
4097         ftrace_startup_enable(0);
4098
4099         mutex_unlock(&ftrace_lock);
4100         return 0;
4101
4102 out_put:
4103         if (pid != ftrace_swapper_pid)
4104                 put_pid(pid);
4105
4106 out:
4107         mutex_unlock(&ftrace_lock);
4108         return ret;
4109 }
4110
4111 static void ftrace_pid_reset(void)
4112 {
4113         struct ftrace_pid *fpid, *safe;
4114
4115         mutex_lock(&ftrace_lock);
4116         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4117                 struct pid *pid = fpid->pid;
4118
4119                 clear_ftrace_pid_task(pid);
4120
4121                 list_del(&fpid->list);
4122                 kfree(fpid);
4123         }
4124
4125         ftrace_update_pid_func();
4126         ftrace_startup_enable(0);
4127
4128         mutex_unlock(&ftrace_lock);
4129 }
4130
4131 static void *fpid_start(struct seq_file *m, loff_t *pos)
4132 {
4133         mutex_lock(&ftrace_lock);
4134
4135         if (list_empty(&ftrace_pids) && (!*pos))
4136                 return (void *) 1;
4137
4138         return seq_list_start(&ftrace_pids, *pos);
4139 }
4140
4141 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4142 {
4143         if (v == (void *)1)
4144                 return NULL;
4145
4146         return seq_list_next(v, &ftrace_pids, pos);
4147 }
4148
4149 static void fpid_stop(struct seq_file *m, void *p)
4150 {
4151         mutex_unlock(&ftrace_lock);
4152 }
4153
4154 static int fpid_show(struct seq_file *m, void *v)
4155 {
4156         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4157
4158         if (v == (void *)1) {
4159                 seq_printf(m, "no pid\n");
4160                 return 0;
4161         }
4162
4163         if (fpid->pid == ftrace_swapper_pid)
4164                 seq_printf(m, "swapper tasks\n");
4165         else
4166                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4167
4168         return 0;
4169 }
4170
4171 static const struct seq_operations ftrace_pid_sops = {
4172         .start = fpid_start,
4173         .next = fpid_next,
4174         .stop = fpid_stop,
4175         .show = fpid_show,
4176 };
4177
4178 static int
4179 ftrace_pid_open(struct inode *inode, struct file *file)
4180 {
4181         int ret = 0;
4182
4183         if ((file->f_mode & FMODE_WRITE) &&
4184             (file->f_flags & O_TRUNC))
4185                 ftrace_pid_reset();
4186
4187         if (file->f_mode & FMODE_READ)
4188                 ret = seq_open(file, &ftrace_pid_sops);
4189
4190         return ret;
4191 }
4192
4193 static ssize_t
4194 ftrace_pid_write(struct file *filp, const char __user *ubuf,
4195                    size_t cnt, loff_t *ppos)
4196 {
4197         char buf[64], *tmp;
4198         long val;
4199         int ret;
4200
4201         if (cnt >= sizeof(buf))
4202                 return -EINVAL;
4203
4204         if (copy_from_user(&buf, ubuf, cnt))
4205                 return -EFAULT;
4206
4207         buf[cnt] = 0;
4208
4209         /*
4210          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4211          * to clean the filter quietly.
4212          */
4213         tmp = strstrip(buf);
4214         if (strlen(tmp) == 0)
4215                 return 1;
4216
4217         ret = strict_strtol(tmp, 10, &val);
4218         if (ret < 0)
4219                 return ret;
4220
4221         ret = ftrace_pid_add(val);
4222
4223         return ret ? ret : cnt;
4224 }
4225
4226 static int
4227 ftrace_pid_release(struct inode *inode, struct file *file)
4228 {
4229         if (file->f_mode & FMODE_READ)
4230                 seq_release(inode, file);
4231
4232         return 0;
4233 }
4234
4235 static const struct file_operations ftrace_pid_fops = {
4236         .open           = ftrace_pid_open,
4237         .write          = ftrace_pid_write,
4238         .read           = seq_read,
4239         .llseek         = seq_lseek,
4240         .release        = ftrace_pid_release,
4241 };
4242
4243 static __init int ftrace_init_debugfs(void)
4244 {
4245         struct dentry *d_tracer;
4246
4247         d_tracer = tracing_init_dentry();
4248         if (!d_tracer)
4249                 return 0;
4250
4251         ftrace_init_dyn_debugfs(d_tracer);
4252
4253         trace_create_file("set_ftrace_pid", 0644, d_tracer,
4254                             NULL, &ftrace_pid_fops);
4255
4256         ftrace_profile_debugfs(d_tracer);
4257
4258         return 0;
4259 }
4260 fs_initcall(ftrace_init_debugfs);
4261
4262 /**
4263  * ftrace_kill - kill ftrace
4264  *
4265  * This function should be used by panic code. It stops ftrace
4266  * but in a not so nice way. If you need to simply kill ftrace
4267  * from a non-atomic section, use ftrace_kill.
4268  */
4269 void ftrace_kill(void)
4270 {
4271         ftrace_disabled = 1;
4272         ftrace_enabled = 0;
4273         clear_ftrace_function();
4274 }
4275
4276 /**
4277  * Test if ftrace is dead or not.
4278  */
4279 int ftrace_is_dead(void)
4280 {
4281         return ftrace_disabled;
4282 }
4283
4284 /**
4285  * register_ftrace_function - register a function for profiling
4286  * @ops - ops structure that holds the function for profiling.
4287  *
4288  * Register a function to be called by all functions in the
4289  * kernel.
4290  *
4291  * Note: @ops->func and all the functions it calls must be labeled
4292  *       with "notrace", otherwise it will go into a
4293  *       recursive loop.
4294  */
4295 int register_ftrace_function(struct ftrace_ops *ops)
4296 {
4297         int ret = -1;
4298
4299         mutex_lock(&ftrace_lock);
4300
4301         if (unlikely(ftrace_disabled))
4302                 goto out_unlock;
4303
4304         ret = __register_ftrace_function(ops);
4305         if (!ret)
4306                 ret = ftrace_startup(ops, 0);
4307
4308
4309  out_unlock:
4310         mutex_unlock(&ftrace_lock);
4311         return ret;
4312 }
4313 EXPORT_SYMBOL_GPL(register_ftrace_function);
4314
4315 /**
4316  * unregister_ftrace_function - unregister a function for profiling.
4317  * @ops - ops structure that holds the function to unregister
4318  *
4319  * Unregister a function that was added to be called by ftrace profiling.
4320  */
4321 int unregister_ftrace_function(struct ftrace_ops *ops)
4322 {
4323         int ret;
4324
4325         mutex_lock(&ftrace_lock);
4326         ret = __unregister_ftrace_function(ops);
4327         if (!ret)
4328                 ftrace_shutdown(ops, 0);
4329         mutex_unlock(&ftrace_lock);
4330
4331         return ret;
4332 }
4333 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4334
4335 int
4336 ftrace_enable_sysctl(struct ctl_table *table, int write,
4337                      void __user *buffer, size_t *lenp,
4338                      loff_t *ppos)
4339 {
4340         int ret = -ENODEV;
4341
4342         mutex_lock(&ftrace_lock);
4343
4344         if (unlikely(ftrace_disabled))
4345                 goto out;
4346
4347         ret = proc_dointvec(table, write, buffer, lenp, ppos);
4348
4349         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4350                 goto out;
4351
4352         last_ftrace_enabled = !!ftrace_enabled;
4353
4354         if (ftrace_enabled) {
4355
4356                 ftrace_startup_sysctl();
4357
4358                 /* we are starting ftrace again */
4359                 if (ftrace_ops_list != &ftrace_list_end) {
4360                         if (ftrace_ops_list->next == &ftrace_list_end)
4361                                 ftrace_trace_function = ftrace_ops_list->func;
4362                         else
4363                                 ftrace_trace_function = ftrace_ops_list_func;
4364                 }
4365
4366         } else {
4367                 /* stopping ftrace calls (just send to ftrace_stub) */
4368                 ftrace_trace_function = ftrace_stub;
4369
4370                 ftrace_shutdown_sysctl();
4371         }
4372
4373  out:
4374         mutex_unlock(&ftrace_lock);
4375         return ret;
4376 }
4377
4378 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4379
4380 static int ftrace_graph_active;
4381 static struct notifier_block ftrace_suspend_notifier;
4382
4383 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4384 {
4385         return 0;
4386 }
4387
4388 /* The callbacks that hook a function */
4389 trace_func_graph_ret_t ftrace_graph_return =
4390                         (trace_func_graph_ret_t)ftrace_stub;
4391 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4392
4393 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4394 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4395 {
4396         int i;
4397         int ret = 0;
4398         unsigned long flags;
4399         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4400         struct task_struct *g, *t;
4401
4402         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4403                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4404                                         * sizeof(struct ftrace_ret_stack),
4405                                         GFP_KERNEL);
4406                 if (!ret_stack_list[i]) {
4407                         start = 0;
4408                         end = i;
4409                         ret = -ENOMEM;
4410                         goto free;
4411                 }
4412         }
4413
4414         read_lock_irqsave(&tasklist_lock, flags);
4415         do_each_thread(g, t) {
4416                 if (start == end) {
4417                         ret = -EAGAIN;
4418                         goto unlock;
4419                 }
4420
4421                 if (t->ret_stack == NULL) {
4422                         atomic_set(&t->tracing_graph_pause, 0);
4423                         atomic_set(&t->trace_overrun, 0);
4424                         t->curr_ret_stack = -1;
4425                         /* Make sure the tasks see the -1 first: */
4426                         smp_wmb();
4427                         t->ret_stack = ret_stack_list[start++];
4428                 }
4429         } while_each_thread(g, t);
4430
4431 unlock:
4432         read_unlock_irqrestore(&tasklist_lock, flags);
4433 free:
4434         for (i = start; i < end; i++)
4435                 kfree(ret_stack_list[i]);
4436         return ret;
4437 }
4438
4439 static void
4440 ftrace_graph_probe_sched_switch(void *ignore,
4441                         struct task_struct *prev, struct task_struct *next)
4442 {
4443         unsigned long long timestamp;
4444         int index;
4445
4446         /*
4447          * Does the user want to count the time a function was asleep.
4448          * If so, do not update the time stamps.
4449          */
4450         if (trace_flags & TRACE_ITER_SLEEP_TIME)
4451                 return;
4452
4453         timestamp = trace_clock_local();
4454
4455         prev->ftrace_timestamp = timestamp;
4456
4457         /* only process tasks that we timestamped */
4458         if (!next->ftrace_timestamp)
4459                 return;
4460
4461         /*
4462          * Update all the counters in next to make up for the
4463          * time next was sleeping.
4464          */
4465         timestamp -= next->ftrace_timestamp;
4466
4467         for (index = next->curr_ret_stack; index >= 0; index--)
4468                 next->ret_stack[index].calltime += timestamp;
4469 }
4470
4471 /* Allocate a return stack for each task */
4472 static int start_graph_tracing(void)
4473 {
4474         struct ftrace_ret_stack **ret_stack_list;
4475         int ret, cpu;
4476
4477         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4478                                 sizeof(struct ftrace_ret_stack *),
4479                                 GFP_KERNEL);
4480
4481         if (!ret_stack_list)
4482                 return -ENOMEM;
4483
4484         /* The cpu_boot init_task->ret_stack will never be freed */
4485         for_each_online_cpu(cpu) {
4486                 if (!idle_task(cpu)->ret_stack)
4487                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4488         }
4489
4490         do {
4491                 ret = alloc_retstack_tasklist(ret_stack_list);
4492         } while (ret == -EAGAIN);
4493
4494         if (!ret) {
4495                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4496                 if (ret)
4497                         pr_info("ftrace_graph: Couldn't activate tracepoint"
4498                                 " probe to kernel_sched_switch\n");
4499         }
4500
4501         kfree(ret_stack_list);
4502         return ret;
4503 }
4504
4505 /*
4506  * Hibernation protection.
4507  * The state of the current task is too much unstable during
4508  * suspend/restore to disk. We want to protect against that.
4509  */
4510 static int
4511 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4512                                                         void *unused)
4513 {
4514         switch (state) {
4515         case PM_HIBERNATION_PREPARE:
4516                 pause_graph_tracing();
4517                 break;
4518
4519         case PM_POST_HIBERNATION:
4520                 unpause_graph_tracing();
4521                 break;
4522         }
4523         return NOTIFY_DONE;
4524 }
4525
4526 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4527                         trace_func_graph_ent_t entryfunc)
4528 {
4529         int ret = 0;
4530
4531         mutex_lock(&ftrace_lock);
4532
4533         /* we currently allow only one tracer registered at a time */
4534         if (ftrace_graph_active) {
4535                 ret = -EBUSY;
4536                 goto out;
4537         }
4538
4539         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4540         register_pm_notifier(&ftrace_suspend_notifier);
4541
4542         ftrace_graph_active++;
4543         ret = start_graph_tracing();
4544         if (ret) {
4545                 ftrace_graph_active--;
4546                 goto out;
4547         }
4548
4549         ftrace_graph_return = retfunc;
4550         ftrace_graph_entry = entryfunc;
4551
4552         ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4553
4554 out:
4555         mutex_unlock(&ftrace_lock);
4556         return ret;
4557 }
4558
4559 void unregister_ftrace_graph(void)
4560 {
4561         mutex_lock(&ftrace_lock);
4562
4563         if (unlikely(!ftrace_graph_active))
4564                 goto out;
4565
4566         ftrace_graph_active--;
4567         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4568         ftrace_graph_entry = ftrace_graph_entry_stub;
4569         ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4570         unregister_pm_notifier(&ftrace_suspend_notifier);
4571         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4572
4573  out:
4574         mutex_unlock(&ftrace_lock);
4575 }
4576
4577 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4578
4579 static void
4580 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4581 {
4582         atomic_set(&t->tracing_graph_pause, 0);
4583         atomic_set(&t->trace_overrun, 0);
4584         t->ftrace_timestamp = 0;
4585         /* make curr_ret_stack visible before we add the ret_stack */
4586         smp_wmb();
4587         t->ret_stack = ret_stack;
4588 }
4589
4590 /*
4591  * Allocate a return stack for the idle task. May be the first
4592  * time through, or it may be done by CPU hotplug online.
4593  */
4594 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4595 {
4596         t->curr_ret_stack = -1;
4597         /*
4598          * The idle task has no parent, it either has its own
4599          * stack or no stack at all.
4600          */
4601         if (t->ret_stack)
4602                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4603
4604         if (ftrace_graph_active) {
4605                 struct ftrace_ret_stack *ret_stack;
4606
4607                 ret_stack = per_cpu(idle_ret_stack, cpu);
4608                 if (!ret_stack) {
4609                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4610                                             * sizeof(struct ftrace_ret_stack),
4611                                             GFP_KERNEL);
4612                         if (!ret_stack)
4613                                 return;
4614                         per_cpu(idle_ret_stack, cpu) = ret_stack;
4615                 }
4616                 graph_init_task(t, ret_stack);
4617         }
4618 }
4619
4620 /* Allocate a return stack for newly created task */
4621 void ftrace_graph_init_task(struct task_struct *t)
4622 {
4623         /* Make sure we do not use the parent ret_stack */
4624         t->ret_stack = NULL;
4625         t->curr_ret_stack = -1;
4626
4627         if (ftrace_graph_active) {
4628                 struct ftrace_ret_stack *ret_stack;
4629
4630                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4631                                 * sizeof(struct ftrace_ret_stack),
4632                                 GFP_KERNEL);
4633                 if (!ret_stack)
4634                         return;
4635                 graph_init_task(t, ret_stack);
4636         }
4637 }
4638
4639 void ftrace_graph_exit_task(struct task_struct *t)
4640 {
4641         struct ftrace_ret_stack *ret_stack = t->ret_stack;
4642
4643         t->ret_stack = NULL;
4644         /* NULL must become visible to IRQs before we free it: */
4645         barrier();
4646
4647         kfree(ret_stack);
4648 }
4649
4650 void ftrace_graph_stop(void)
4651 {
4652         ftrace_stop();
4653 }
4654 #endif