]> git.karo-electronics.de Git - karo-tx-linux.git/blob - kernel/trace/ftrace.c
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35
36 #include <trace/events/sched.h>
37
38 #include <asm/setup.h>
39
40 #include "trace_output.h"
41 #include "trace_stat.h"
42
43 #define FTRACE_WARN_ON(cond)                    \
44         ({                                      \
45                 int ___r = cond;                \
46                 if (WARN_ON(___r))              \
47                         ftrace_kill();          \
48                 ___r;                           \
49         })
50
51 #define FTRACE_WARN_ON_ONCE(cond)               \
52         ({                                      \
53                 int ___r = cond;                \
54                 if (WARN_ON_ONCE(___r))         \
55                         ftrace_kill();          \
56                 ___r;                           \
57         })
58
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
67 /* ftrace_enabled is a method to turn ftrace on or off */
68 int ftrace_enabled __read_mostly;
69 static int last_ftrace_enabled;
70
71 /* Quick disabling of function tracer. */
72 int function_trace_stop;
73
74 /* List for set_ftrace_pid's pids. */
75 LIST_HEAD(ftrace_pids);
76 struct ftrace_pid {
77         struct list_head list;
78         struct pid *pid;
79 };
80
81 /*
82  * ftrace_disabled is set when an anomaly is discovered.
83  * ftrace_disabled is much stronger than ftrace_enabled.
84  */
85 static int ftrace_disabled __read_mostly;
86
87 static DEFINE_MUTEX(ftrace_lock);
88
89 static struct ftrace_ops ftrace_list_end __read_mostly = {
90         .func           = ftrace_stub,
91 };
92
93 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
94 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
95 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
96 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
97 static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
98 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
99 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
100 static struct ftrace_ops global_ops;
101 static struct ftrace_ops control_ops;
102
103 static void
104 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
105
106 /*
107  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
108  * can use rcu_dereference_raw() is that elements removed from this list
109  * are simply leaked, so there is no need to interact with a grace-period
110  * mechanism.  The rcu_dereference_raw() calls are needed to handle
111  * concurrent insertions into the ftrace_global_list.
112  *
113  * Silly Alpha and silly pointer-speculation compiler optimizations!
114  */
115 static void ftrace_global_list_func(unsigned long ip,
116                                     unsigned long parent_ip)
117 {
118         struct ftrace_ops *op;
119
120         if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
121                 return;
122
123         trace_recursion_set(TRACE_GLOBAL_BIT);
124         op = rcu_dereference_raw(ftrace_global_list); /*see above*/
125         while (op != &ftrace_list_end) {
126                 op->func(ip, parent_ip);
127                 op = rcu_dereference_raw(op->next); /*see above*/
128         };
129         trace_recursion_clear(TRACE_GLOBAL_BIT);
130 }
131
132 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
133 {
134         if (!test_tsk_trace_trace(current))
135                 return;
136
137         ftrace_pid_function(ip, parent_ip);
138 }
139
140 static void set_ftrace_pid_function(ftrace_func_t func)
141 {
142         /* do not set ftrace_pid_function to itself! */
143         if (func != ftrace_pid_func)
144                 ftrace_pid_function = func;
145 }
146
147 /**
148  * clear_ftrace_function - reset the ftrace function
149  *
150  * This NULLs the ftrace function and in essence stops
151  * tracing.  There may be lag
152  */
153 void clear_ftrace_function(void)
154 {
155         ftrace_trace_function = ftrace_stub;
156         __ftrace_trace_function = ftrace_stub;
157         __ftrace_trace_function_delay = ftrace_stub;
158         ftrace_pid_function = ftrace_stub;
159 }
160
161 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
162 /*
163  * For those archs that do not test ftrace_trace_stop in their
164  * mcount call site, we need to do it from C.
165  */
166 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
167 {
168         if (function_trace_stop)
169                 return;
170
171         __ftrace_trace_function(ip, parent_ip);
172 }
173 #endif
174
175 static void control_ops_disable_all(struct ftrace_ops *ops)
176 {
177         int cpu;
178
179         for_each_possible_cpu(cpu)
180                 *per_cpu_ptr(ops->disabled, cpu) = 1;
181 }
182
183 static int control_ops_alloc(struct ftrace_ops *ops)
184 {
185         int __percpu *disabled;
186
187         disabled = alloc_percpu(int);
188         if (!disabled)
189                 return -ENOMEM;
190
191         ops->disabled = disabled;
192         control_ops_disable_all(ops);
193         return 0;
194 }
195
196 static void control_ops_free(struct ftrace_ops *ops)
197 {
198         free_percpu(ops->disabled);
199 }
200
201 static void update_global_ops(void)
202 {
203         ftrace_func_t func;
204
205         /*
206          * If there's only one function registered, then call that
207          * function directly. Otherwise, we need to iterate over the
208          * registered callers.
209          */
210         if (ftrace_global_list == &ftrace_list_end ||
211             ftrace_global_list->next == &ftrace_list_end)
212                 func = ftrace_global_list->func;
213         else
214                 func = ftrace_global_list_func;
215
216         /* If we filter on pids, update to use the pid function */
217         if (!list_empty(&ftrace_pids)) {
218                 set_ftrace_pid_function(func);
219                 func = ftrace_pid_func;
220         }
221
222         global_ops.func = func;
223 }
224
225 static void update_ftrace_function(void)
226 {
227         ftrace_func_t func;
228
229         update_global_ops();
230
231         /*
232          * If we are at the end of the list and this ops is
233          * not dynamic, then have the mcount trampoline call
234          * the function directly
235          */
236         if (ftrace_ops_list == &ftrace_list_end ||
237             (ftrace_ops_list->next == &ftrace_list_end &&
238              !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
239                 func = ftrace_ops_list->func;
240         else
241                 func = ftrace_ops_list_func;
242
243 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
244         ftrace_trace_function = func;
245 #else
246 #ifdef CONFIG_DYNAMIC_FTRACE
247         /* do not update till all functions have been modified */
248         __ftrace_trace_function_delay = func;
249 #else
250         __ftrace_trace_function = func;
251 #endif
252         ftrace_trace_function =
253                 (func == ftrace_stub) ? func : ftrace_test_stop_func;
254 #endif
255 }
256
257 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
258 {
259         ops->next = *list;
260         /*
261          * We are entering ops into the list but another
262          * CPU might be walking that list. We need to make sure
263          * the ops->next pointer is valid before another CPU sees
264          * the ops pointer included into the list.
265          */
266         rcu_assign_pointer(*list, ops);
267 }
268
269 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
270 {
271         struct ftrace_ops **p;
272
273         /*
274          * If we are removing the last function, then simply point
275          * to the ftrace_stub.
276          */
277         if (*list == ops && ops->next == &ftrace_list_end) {
278                 *list = &ftrace_list_end;
279                 return 0;
280         }
281
282         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
283                 if (*p == ops)
284                         break;
285
286         if (*p != ops)
287                 return -1;
288
289         *p = (*p)->next;
290         return 0;
291 }
292
293 static void add_ftrace_list_ops(struct ftrace_ops **list,
294                                 struct ftrace_ops *main_ops,
295                                 struct ftrace_ops *ops)
296 {
297         int first = *list == &ftrace_list_end;
298         add_ftrace_ops(list, ops);
299         if (first)
300                 add_ftrace_ops(&ftrace_ops_list, main_ops);
301 }
302
303 static int remove_ftrace_list_ops(struct ftrace_ops **list,
304                                   struct ftrace_ops *main_ops,
305                                   struct ftrace_ops *ops)
306 {
307         int ret = remove_ftrace_ops(list, ops);
308         if (!ret && *list == &ftrace_list_end)
309                 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
310         return ret;
311 }
312
313 static int __register_ftrace_function(struct ftrace_ops *ops)
314 {
315         if (ftrace_disabled)
316                 return -ENODEV;
317
318         if (FTRACE_WARN_ON(ops == &global_ops))
319                 return -EINVAL;
320
321         if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
322                 return -EBUSY;
323
324         /* We don't support both control and global flags set. */
325         if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
326                 return -EINVAL;
327
328         if (!core_kernel_data((unsigned long)ops))
329                 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
330
331         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
332                 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
333                 ops->flags |= FTRACE_OPS_FL_ENABLED;
334         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
335                 if (control_ops_alloc(ops))
336                         return -ENOMEM;
337                 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
338         } else
339                 add_ftrace_ops(&ftrace_ops_list, ops);
340
341         if (ftrace_enabled)
342                 update_ftrace_function();
343
344         return 0;
345 }
346
347 static int __unregister_ftrace_function(struct ftrace_ops *ops)
348 {
349         int ret;
350
351         if (ftrace_disabled)
352                 return -ENODEV;
353
354         if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
355                 return -EBUSY;
356
357         if (FTRACE_WARN_ON(ops == &global_ops))
358                 return -EINVAL;
359
360         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
361                 ret = remove_ftrace_list_ops(&ftrace_global_list,
362                                              &global_ops, ops);
363                 if (!ret)
364                         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
365         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
366                 ret = remove_ftrace_list_ops(&ftrace_control_list,
367                                              &control_ops, ops);
368                 if (!ret) {
369                         /*
370                          * The ftrace_ops is now removed from the list,
371                          * so there'll be no new users. We must ensure
372                          * all current users are done before we free
373                          * the control data.
374                          */
375                         synchronize_sched();
376                         control_ops_free(ops);
377                 }
378         } else
379                 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
380
381         if (ret < 0)
382                 return ret;
383
384         if (ftrace_enabled)
385                 update_ftrace_function();
386
387         /*
388          * Dynamic ops may be freed, we must make sure that all
389          * callers are done before leaving this function.
390          */
391         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
392                 synchronize_sched();
393
394         return 0;
395 }
396
397 static void ftrace_update_pid_func(void)
398 {
399         /* Only do something if we are tracing something */
400         if (ftrace_trace_function == ftrace_stub)
401                 return;
402
403         update_ftrace_function();
404 }
405
406 #ifdef CONFIG_FUNCTION_PROFILER
407 struct ftrace_profile {
408         struct hlist_node               node;
409         unsigned long                   ip;
410         unsigned long                   counter;
411 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
412         unsigned long long              time;
413         unsigned long long              time_squared;
414 #endif
415 };
416
417 struct ftrace_profile_page {
418         struct ftrace_profile_page      *next;
419         unsigned long                   index;
420         struct ftrace_profile           records[];
421 };
422
423 struct ftrace_profile_stat {
424         atomic_t                        disabled;
425         struct hlist_head               *hash;
426         struct ftrace_profile_page      *pages;
427         struct ftrace_profile_page      *start;
428         struct tracer_stat              stat;
429 };
430
431 #define PROFILE_RECORDS_SIZE                                            \
432         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
433
434 #define PROFILES_PER_PAGE                                       \
435         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
436
437 static int ftrace_profile_bits __read_mostly;
438 static int ftrace_profile_enabled __read_mostly;
439
440 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
441 static DEFINE_MUTEX(ftrace_profile_lock);
442
443 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
444
445 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
446
447 static void *
448 function_stat_next(void *v, int idx)
449 {
450         struct ftrace_profile *rec = v;
451         struct ftrace_profile_page *pg;
452
453         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
454
455  again:
456         if (idx != 0)
457                 rec++;
458
459         if ((void *)rec >= (void *)&pg->records[pg->index]) {
460                 pg = pg->next;
461                 if (!pg)
462                         return NULL;
463                 rec = &pg->records[0];
464                 if (!rec->counter)
465                         goto again;
466         }
467
468         return rec;
469 }
470
471 static void *function_stat_start(struct tracer_stat *trace)
472 {
473         struct ftrace_profile_stat *stat =
474                 container_of(trace, struct ftrace_profile_stat, stat);
475
476         if (!stat || !stat->start)
477                 return NULL;
478
479         return function_stat_next(&stat->start->records[0], 0);
480 }
481
482 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
483 /* function graph compares on total time */
484 static int function_stat_cmp(void *p1, void *p2)
485 {
486         struct ftrace_profile *a = p1;
487         struct ftrace_profile *b = p2;
488
489         if (a->time < b->time)
490                 return -1;
491         if (a->time > b->time)
492                 return 1;
493         else
494                 return 0;
495 }
496 #else
497 /* not function graph compares against hits */
498 static int function_stat_cmp(void *p1, void *p2)
499 {
500         struct ftrace_profile *a = p1;
501         struct ftrace_profile *b = p2;
502
503         if (a->counter < b->counter)
504                 return -1;
505         if (a->counter > b->counter)
506                 return 1;
507         else
508                 return 0;
509 }
510 #endif
511
512 static int function_stat_headers(struct seq_file *m)
513 {
514 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
515         seq_printf(m, "  Function                               "
516                    "Hit    Time            Avg             s^2\n"
517                       "  --------                               "
518                    "---    ----            ---             ---\n");
519 #else
520         seq_printf(m, "  Function                               Hit\n"
521                       "  --------                               ---\n");
522 #endif
523         return 0;
524 }
525
526 static int function_stat_show(struct seq_file *m, void *v)
527 {
528         struct ftrace_profile *rec = v;
529         char str[KSYM_SYMBOL_LEN];
530         int ret = 0;
531 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
532         static struct trace_seq s;
533         unsigned long long avg;
534         unsigned long long stddev;
535 #endif
536         mutex_lock(&ftrace_profile_lock);
537
538         /* we raced with function_profile_reset() */
539         if (unlikely(rec->counter == 0)) {
540                 ret = -EBUSY;
541                 goto out;
542         }
543
544         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
545         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
546
547 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
548         seq_printf(m, "    ");
549         avg = rec->time;
550         do_div(avg, rec->counter);
551
552         /* Sample standard deviation (s^2) */
553         if (rec->counter <= 1)
554                 stddev = 0;
555         else {
556                 stddev = rec->time_squared - rec->counter * avg * avg;
557                 /*
558                  * Divide only 1000 for ns^2 -> us^2 conversion.
559                  * trace_print_graph_duration will divide 1000 again.
560                  */
561                 do_div(stddev, (rec->counter - 1) * 1000);
562         }
563
564         trace_seq_init(&s);
565         trace_print_graph_duration(rec->time, &s);
566         trace_seq_puts(&s, "    ");
567         trace_print_graph_duration(avg, &s);
568         trace_seq_puts(&s, "    ");
569         trace_print_graph_duration(stddev, &s);
570         trace_print_seq(m, &s);
571 #endif
572         seq_putc(m, '\n');
573 out:
574         mutex_unlock(&ftrace_profile_lock);
575
576         return ret;
577 }
578
579 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
580 {
581         struct ftrace_profile_page *pg;
582
583         pg = stat->pages = stat->start;
584
585         while (pg) {
586                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
587                 pg->index = 0;
588                 pg = pg->next;
589         }
590
591         memset(stat->hash, 0,
592                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
593 }
594
595 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
596 {
597         struct ftrace_profile_page *pg;
598         int functions;
599         int pages;
600         int i;
601
602         /* If we already allocated, do nothing */
603         if (stat->pages)
604                 return 0;
605
606         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
607         if (!stat->pages)
608                 return -ENOMEM;
609
610 #ifdef CONFIG_DYNAMIC_FTRACE
611         functions = ftrace_update_tot_cnt;
612 #else
613         /*
614          * We do not know the number of functions that exist because
615          * dynamic tracing is what counts them. With past experience
616          * we have around 20K functions. That should be more than enough.
617          * It is highly unlikely we will execute every function in
618          * the kernel.
619          */
620         functions = 20000;
621 #endif
622
623         pg = stat->start = stat->pages;
624
625         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
626
627         for (i = 0; i < pages; i++) {
628                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
629                 if (!pg->next)
630                         goto out_free;
631                 pg = pg->next;
632         }
633
634         return 0;
635
636  out_free:
637         pg = stat->start;
638         while (pg) {
639                 unsigned long tmp = (unsigned long)pg;
640
641                 pg = pg->next;
642                 free_page(tmp);
643         }
644
645         free_page((unsigned long)stat->pages);
646         stat->pages = NULL;
647         stat->start = NULL;
648
649         return -ENOMEM;
650 }
651
652 static int ftrace_profile_init_cpu(int cpu)
653 {
654         struct ftrace_profile_stat *stat;
655         int size;
656
657         stat = &per_cpu(ftrace_profile_stats, cpu);
658
659         if (stat->hash) {
660                 /* If the profile is already created, simply reset it */
661                 ftrace_profile_reset(stat);
662                 return 0;
663         }
664
665         /*
666          * We are profiling all functions, but usually only a few thousand
667          * functions are hit. We'll make a hash of 1024 items.
668          */
669         size = FTRACE_PROFILE_HASH_SIZE;
670
671         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
672
673         if (!stat->hash)
674                 return -ENOMEM;
675
676         if (!ftrace_profile_bits) {
677                 size--;
678
679                 for (; size; size >>= 1)
680                         ftrace_profile_bits++;
681         }
682
683         /* Preallocate the function profiling pages */
684         if (ftrace_profile_pages_init(stat) < 0) {
685                 kfree(stat->hash);
686                 stat->hash = NULL;
687                 return -ENOMEM;
688         }
689
690         return 0;
691 }
692
693 static int ftrace_profile_init(void)
694 {
695         int cpu;
696         int ret = 0;
697
698         for_each_online_cpu(cpu) {
699                 ret = ftrace_profile_init_cpu(cpu);
700                 if (ret)
701                         break;
702         }
703
704         return ret;
705 }
706
707 /* interrupts must be disabled */
708 static struct ftrace_profile *
709 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
710 {
711         struct ftrace_profile *rec;
712         struct hlist_head *hhd;
713         struct hlist_node *n;
714         unsigned long key;
715
716         key = hash_long(ip, ftrace_profile_bits);
717         hhd = &stat->hash[key];
718
719         if (hlist_empty(hhd))
720                 return NULL;
721
722         hlist_for_each_entry_rcu(rec, n, hhd, node) {
723                 if (rec->ip == ip)
724                         return rec;
725         }
726
727         return NULL;
728 }
729
730 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
731                                struct ftrace_profile *rec)
732 {
733         unsigned long key;
734
735         key = hash_long(rec->ip, ftrace_profile_bits);
736         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
737 }
738
739 /*
740  * The memory is already allocated, this simply finds a new record to use.
741  */
742 static struct ftrace_profile *
743 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
744 {
745         struct ftrace_profile *rec = NULL;
746
747         /* prevent recursion (from NMIs) */
748         if (atomic_inc_return(&stat->disabled) != 1)
749                 goto out;
750
751         /*
752          * Try to find the function again since an NMI
753          * could have added it
754          */
755         rec = ftrace_find_profiled_func(stat, ip);
756         if (rec)
757                 goto out;
758
759         if (stat->pages->index == PROFILES_PER_PAGE) {
760                 if (!stat->pages->next)
761                         goto out;
762                 stat->pages = stat->pages->next;
763         }
764
765         rec = &stat->pages->records[stat->pages->index++];
766         rec->ip = ip;
767         ftrace_add_profile(stat, rec);
768
769  out:
770         atomic_dec(&stat->disabled);
771
772         return rec;
773 }
774
775 static void
776 function_profile_call(unsigned long ip, unsigned long parent_ip)
777 {
778         struct ftrace_profile_stat *stat;
779         struct ftrace_profile *rec;
780         unsigned long flags;
781
782         if (!ftrace_profile_enabled)
783                 return;
784
785         local_irq_save(flags);
786
787         stat = &__get_cpu_var(ftrace_profile_stats);
788         if (!stat->hash || !ftrace_profile_enabled)
789                 goto out;
790
791         rec = ftrace_find_profiled_func(stat, ip);
792         if (!rec) {
793                 rec = ftrace_profile_alloc(stat, ip);
794                 if (!rec)
795                         goto out;
796         }
797
798         rec->counter++;
799  out:
800         local_irq_restore(flags);
801 }
802
803 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
804 static int profile_graph_entry(struct ftrace_graph_ent *trace)
805 {
806         function_profile_call(trace->func, 0);
807         return 1;
808 }
809
810 static void profile_graph_return(struct ftrace_graph_ret *trace)
811 {
812         struct ftrace_profile_stat *stat;
813         unsigned long long calltime;
814         struct ftrace_profile *rec;
815         unsigned long flags;
816
817         local_irq_save(flags);
818         stat = &__get_cpu_var(ftrace_profile_stats);
819         if (!stat->hash || !ftrace_profile_enabled)
820                 goto out;
821
822         /* If the calltime was zero'd ignore it */
823         if (!trace->calltime)
824                 goto out;
825
826         calltime = trace->rettime - trace->calltime;
827
828         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
829                 int index;
830
831                 index = trace->depth;
832
833                 /* Append this call time to the parent time to subtract */
834                 if (index)
835                         current->ret_stack[index - 1].subtime += calltime;
836
837                 if (current->ret_stack[index].subtime < calltime)
838                         calltime -= current->ret_stack[index].subtime;
839                 else
840                         calltime = 0;
841         }
842
843         rec = ftrace_find_profiled_func(stat, trace->func);
844         if (rec) {
845                 rec->time += calltime;
846                 rec->time_squared += calltime * calltime;
847         }
848
849  out:
850         local_irq_restore(flags);
851 }
852
853 static int register_ftrace_profiler(void)
854 {
855         return register_ftrace_graph(&profile_graph_return,
856                                      &profile_graph_entry);
857 }
858
859 static void unregister_ftrace_profiler(void)
860 {
861         unregister_ftrace_graph();
862 }
863 #else
864 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
865         .func           = function_profile_call,
866 };
867
868 static int register_ftrace_profiler(void)
869 {
870         return register_ftrace_function(&ftrace_profile_ops);
871 }
872
873 static void unregister_ftrace_profiler(void)
874 {
875         unregister_ftrace_function(&ftrace_profile_ops);
876 }
877 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
878
879 static ssize_t
880 ftrace_profile_write(struct file *filp, const char __user *ubuf,
881                      size_t cnt, loff_t *ppos)
882 {
883         unsigned long val;
884         int ret;
885
886         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
887         if (ret)
888                 return ret;
889
890         val = !!val;
891
892         mutex_lock(&ftrace_profile_lock);
893         if (ftrace_profile_enabled ^ val) {
894                 if (val) {
895                         ret = ftrace_profile_init();
896                         if (ret < 0) {
897                                 cnt = ret;
898                                 goto out;
899                         }
900
901                         ret = register_ftrace_profiler();
902                         if (ret < 0) {
903                                 cnt = ret;
904                                 goto out;
905                         }
906                         ftrace_profile_enabled = 1;
907                 } else {
908                         ftrace_profile_enabled = 0;
909                         /*
910                          * unregister_ftrace_profiler calls stop_machine
911                          * so this acts like an synchronize_sched.
912                          */
913                         unregister_ftrace_profiler();
914                 }
915         }
916  out:
917         mutex_unlock(&ftrace_profile_lock);
918
919         *ppos += cnt;
920
921         return cnt;
922 }
923
924 static ssize_t
925 ftrace_profile_read(struct file *filp, char __user *ubuf,
926                      size_t cnt, loff_t *ppos)
927 {
928         char buf[64];           /* big enough to hold a number */
929         int r;
930
931         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
932         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
933 }
934
935 static const struct file_operations ftrace_profile_fops = {
936         .open           = tracing_open_generic,
937         .read           = ftrace_profile_read,
938         .write          = ftrace_profile_write,
939         .llseek         = default_llseek,
940 };
941
942 /* used to initialize the real stat files */
943 static struct tracer_stat function_stats __initdata = {
944         .name           = "functions",
945         .stat_start     = function_stat_start,
946         .stat_next      = function_stat_next,
947         .stat_cmp       = function_stat_cmp,
948         .stat_headers   = function_stat_headers,
949         .stat_show      = function_stat_show
950 };
951
952 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
953 {
954         struct ftrace_profile_stat *stat;
955         struct dentry *entry;
956         char *name;
957         int ret;
958         int cpu;
959
960         for_each_possible_cpu(cpu) {
961                 stat = &per_cpu(ftrace_profile_stats, cpu);
962
963                 /* allocate enough for function name + cpu number */
964                 name = kmalloc(32, GFP_KERNEL);
965                 if (!name) {
966                         /*
967                          * The files created are permanent, if something happens
968                          * we still do not free memory.
969                          */
970                         WARN(1,
971                              "Could not allocate stat file for cpu %d\n",
972                              cpu);
973                         return;
974                 }
975                 stat->stat = function_stats;
976                 snprintf(name, 32, "function%d", cpu);
977                 stat->stat.name = name;
978                 ret = register_stat_tracer(&stat->stat);
979                 if (ret) {
980                         WARN(1,
981                              "Could not register function stat for cpu %d\n",
982                              cpu);
983                         kfree(name);
984                         return;
985                 }
986         }
987
988         entry = debugfs_create_file("function_profile_enabled", 0644,
989                                     d_tracer, NULL, &ftrace_profile_fops);
990         if (!entry)
991                 pr_warning("Could not create debugfs "
992                            "'function_profile_enabled' entry\n");
993 }
994
995 #else /* CONFIG_FUNCTION_PROFILER */
996 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
997 {
998 }
999 #endif /* CONFIG_FUNCTION_PROFILER */
1000
1001 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1002
1003 #ifdef CONFIG_DYNAMIC_FTRACE
1004
1005 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1006 # error Dynamic ftrace depends on MCOUNT_RECORD
1007 #endif
1008
1009 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1010
1011 struct ftrace_func_probe {
1012         struct hlist_node       node;
1013         struct ftrace_probe_ops *ops;
1014         unsigned long           flags;
1015         unsigned long           ip;
1016         void                    *data;
1017         struct rcu_head         rcu;
1018 };
1019
1020 struct ftrace_func_entry {
1021         struct hlist_node hlist;
1022         unsigned long ip;
1023 };
1024
1025 struct ftrace_hash {
1026         unsigned long           size_bits;
1027         struct hlist_head       *buckets;
1028         unsigned long           count;
1029         struct rcu_head         rcu;
1030 };
1031
1032 /*
1033  * We make these constant because no one should touch them,
1034  * but they are used as the default "empty hash", to avoid allocating
1035  * it all the time. These are in a read only section such that if
1036  * anyone does try to modify it, it will cause an exception.
1037  */
1038 static const struct hlist_head empty_buckets[1];
1039 static const struct ftrace_hash empty_hash = {
1040         .buckets = (struct hlist_head *)empty_buckets,
1041 };
1042 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1043
1044 static struct ftrace_ops global_ops = {
1045         .func                   = ftrace_stub,
1046         .notrace_hash           = EMPTY_HASH,
1047         .filter_hash            = EMPTY_HASH,
1048 };
1049
1050 static DEFINE_MUTEX(ftrace_regex_lock);
1051
1052 struct ftrace_page {
1053         struct ftrace_page      *next;
1054         struct dyn_ftrace       *records;
1055         int                     index;
1056         int                     size;
1057 };
1058
1059 static struct ftrace_page *ftrace_new_pgs;
1060
1061 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1062 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1063
1064 /* estimate from running different kernels */
1065 #define NR_TO_INIT              10000
1066
1067 static struct ftrace_page       *ftrace_pages_start;
1068 static struct ftrace_page       *ftrace_pages;
1069
1070 static bool ftrace_hash_empty(struct ftrace_hash *hash)
1071 {
1072         return !hash || !hash->count;
1073 }
1074
1075 static struct ftrace_func_entry *
1076 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1077 {
1078         unsigned long key;
1079         struct ftrace_func_entry *entry;
1080         struct hlist_head *hhd;
1081         struct hlist_node *n;
1082
1083         if (ftrace_hash_empty(hash))
1084                 return NULL;
1085
1086         if (hash->size_bits > 0)
1087                 key = hash_long(ip, hash->size_bits);
1088         else
1089                 key = 0;
1090
1091         hhd = &hash->buckets[key];
1092
1093         hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1094                 if (entry->ip == ip)
1095                         return entry;
1096         }
1097         return NULL;
1098 }
1099
1100 static void __add_hash_entry(struct ftrace_hash *hash,
1101                              struct ftrace_func_entry *entry)
1102 {
1103         struct hlist_head *hhd;
1104         unsigned long key;
1105
1106         if (hash->size_bits)
1107                 key = hash_long(entry->ip, hash->size_bits);
1108         else
1109                 key = 0;
1110
1111         hhd = &hash->buckets[key];
1112         hlist_add_head(&entry->hlist, hhd);
1113         hash->count++;
1114 }
1115
1116 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1117 {
1118         struct ftrace_func_entry *entry;
1119
1120         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1121         if (!entry)
1122                 return -ENOMEM;
1123
1124         entry->ip = ip;
1125         __add_hash_entry(hash, entry);
1126
1127         return 0;
1128 }
1129
1130 static void
1131 free_hash_entry(struct ftrace_hash *hash,
1132                   struct ftrace_func_entry *entry)
1133 {
1134         hlist_del(&entry->hlist);
1135         kfree(entry);
1136         hash->count--;
1137 }
1138
1139 static void
1140 remove_hash_entry(struct ftrace_hash *hash,
1141                   struct ftrace_func_entry *entry)
1142 {
1143         hlist_del(&entry->hlist);
1144         hash->count--;
1145 }
1146
1147 static void ftrace_hash_clear(struct ftrace_hash *hash)
1148 {
1149         struct hlist_head *hhd;
1150         struct hlist_node *tp, *tn;
1151         struct ftrace_func_entry *entry;
1152         int size = 1 << hash->size_bits;
1153         int i;
1154
1155         if (!hash->count)
1156                 return;
1157
1158         for (i = 0; i < size; i++) {
1159                 hhd = &hash->buckets[i];
1160                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1161                         free_hash_entry(hash, entry);
1162         }
1163         FTRACE_WARN_ON(hash->count);
1164 }
1165
1166 static void free_ftrace_hash(struct ftrace_hash *hash)
1167 {
1168         if (!hash || hash == EMPTY_HASH)
1169                 return;
1170         ftrace_hash_clear(hash);
1171         kfree(hash->buckets);
1172         kfree(hash);
1173 }
1174
1175 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1176 {
1177         struct ftrace_hash *hash;
1178
1179         hash = container_of(rcu, struct ftrace_hash, rcu);
1180         free_ftrace_hash(hash);
1181 }
1182
1183 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1184 {
1185         if (!hash || hash == EMPTY_HASH)
1186                 return;
1187         call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1188 }
1189
1190 void ftrace_free_filter(struct ftrace_ops *ops)
1191 {
1192         free_ftrace_hash(ops->filter_hash);
1193         free_ftrace_hash(ops->notrace_hash);
1194 }
1195
1196 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1197 {
1198         struct ftrace_hash *hash;
1199         int size;
1200
1201         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1202         if (!hash)
1203                 return NULL;
1204
1205         size = 1 << size_bits;
1206         hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1207
1208         if (!hash->buckets) {
1209                 kfree(hash);
1210                 return NULL;
1211         }
1212
1213         hash->size_bits = size_bits;
1214
1215         return hash;
1216 }
1217
1218 static struct ftrace_hash *
1219 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1220 {
1221         struct ftrace_func_entry *entry;
1222         struct ftrace_hash *new_hash;
1223         struct hlist_node *tp;
1224         int size;
1225         int ret;
1226         int i;
1227
1228         new_hash = alloc_ftrace_hash(size_bits);
1229         if (!new_hash)
1230                 return NULL;
1231
1232         /* Empty hash? */
1233         if (ftrace_hash_empty(hash))
1234                 return new_hash;
1235
1236         size = 1 << hash->size_bits;
1237         for (i = 0; i < size; i++) {
1238                 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1239                         ret = add_hash_entry(new_hash, entry->ip);
1240                         if (ret < 0)
1241                                 goto free_hash;
1242                 }
1243         }
1244
1245         FTRACE_WARN_ON(new_hash->count != hash->count);
1246
1247         return new_hash;
1248
1249  free_hash:
1250         free_ftrace_hash(new_hash);
1251         return NULL;
1252 }
1253
1254 static void
1255 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1256 static void
1257 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1258
1259 static int
1260 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1261                  struct ftrace_hash **dst, struct ftrace_hash *src)
1262 {
1263         struct ftrace_func_entry *entry;
1264         struct hlist_node *tp, *tn;
1265         struct hlist_head *hhd;
1266         struct ftrace_hash *old_hash;
1267         struct ftrace_hash *new_hash;
1268         unsigned long key;
1269         int size = src->count;
1270         int bits = 0;
1271         int ret;
1272         int i;
1273
1274         /*
1275          * Remove the current set, update the hash and add
1276          * them back.
1277          */
1278         ftrace_hash_rec_disable(ops, enable);
1279
1280         /*
1281          * If the new source is empty, just free dst and assign it
1282          * the empty_hash.
1283          */
1284         if (!src->count) {
1285                 free_ftrace_hash_rcu(*dst);
1286                 rcu_assign_pointer(*dst, EMPTY_HASH);
1287                 /* still need to update the function records */
1288                 ret = 0;
1289                 goto out;
1290         }
1291
1292         /*
1293          * Make the hash size about 1/2 the # found
1294          */
1295         for (size /= 2; size; size >>= 1)
1296                 bits++;
1297
1298         /* Don't allocate too much */
1299         if (bits > FTRACE_HASH_MAX_BITS)
1300                 bits = FTRACE_HASH_MAX_BITS;
1301
1302         ret = -ENOMEM;
1303         new_hash = alloc_ftrace_hash(bits);
1304         if (!new_hash)
1305                 goto out;
1306
1307         size = 1 << src->size_bits;
1308         for (i = 0; i < size; i++) {
1309                 hhd = &src->buckets[i];
1310                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1311                         if (bits > 0)
1312                                 key = hash_long(entry->ip, bits);
1313                         else
1314                                 key = 0;
1315                         remove_hash_entry(src, entry);
1316                         __add_hash_entry(new_hash, entry);
1317                 }
1318         }
1319
1320         old_hash = *dst;
1321         rcu_assign_pointer(*dst, new_hash);
1322         free_ftrace_hash_rcu(old_hash);
1323
1324         ret = 0;
1325  out:
1326         /*
1327          * Enable regardless of ret:
1328          *  On success, we enable the new hash.
1329          *  On failure, we re-enable the original hash.
1330          */
1331         ftrace_hash_rec_enable(ops, enable);
1332
1333         return ret;
1334 }
1335
1336 /*
1337  * Test the hashes for this ops to see if we want to call
1338  * the ops->func or not.
1339  *
1340  * It's a match if the ip is in the ops->filter_hash or
1341  * the filter_hash does not exist or is empty,
1342  *  AND
1343  * the ip is not in the ops->notrace_hash.
1344  *
1345  * This needs to be called with preemption disabled as
1346  * the hashes are freed with call_rcu_sched().
1347  */
1348 static int
1349 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1350 {
1351         struct ftrace_hash *filter_hash;
1352         struct ftrace_hash *notrace_hash;
1353         int ret;
1354
1355         filter_hash = rcu_dereference_raw(ops->filter_hash);
1356         notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1357
1358         if ((ftrace_hash_empty(filter_hash) ||
1359              ftrace_lookup_ip(filter_hash, ip)) &&
1360             (ftrace_hash_empty(notrace_hash) ||
1361              !ftrace_lookup_ip(notrace_hash, ip)))
1362                 ret = 1;
1363         else
1364                 ret = 0;
1365
1366         return ret;
1367 }
1368
1369 /*
1370  * This is a double for. Do not use 'break' to break out of the loop,
1371  * you must use a goto.
1372  */
1373 #define do_for_each_ftrace_rec(pg, rec)                                 \
1374         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1375                 int _____i;                                             \
1376                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1377                         rec = &pg->records[_____i];
1378
1379 #define while_for_each_ftrace_rec()             \
1380                 }                               \
1381         }
1382
1383
1384 static int ftrace_cmp_recs(const void *a, const void *b)
1385 {
1386         const struct dyn_ftrace *reca = a;
1387         const struct dyn_ftrace *recb = b;
1388
1389         if (reca->ip > recb->ip)
1390                 return 1;
1391         if (reca->ip < recb->ip)
1392                 return -1;
1393         return 0;
1394 }
1395
1396 /**
1397  * ftrace_location - return true if the ip giving is a traced location
1398  * @ip: the instruction pointer to check
1399  *
1400  * Returns 1 if @ip given is a pointer to a ftrace location.
1401  * That is, the instruction that is either a NOP or call to
1402  * the function tracer. It checks the ftrace internal tables to
1403  * determine if the address belongs or not.
1404  */
1405 int ftrace_location(unsigned long ip)
1406 {
1407         struct ftrace_page *pg;
1408         struct dyn_ftrace *rec;
1409         struct dyn_ftrace key;
1410
1411         key.ip = ip;
1412
1413         for (pg = ftrace_pages_start; pg; pg = pg->next) {
1414                 rec = bsearch(&key, pg->records, pg->index,
1415                               sizeof(struct dyn_ftrace),
1416                               ftrace_cmp_recs);
1417                 if (rec)
1418                         return 1;
1419         }
1420
1421         return 0;
1422 }
1423
1424 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1425                                      int filter_hash,
1426                                      bool inc)
1427 {
1428         struct ftrace_hash *hash;
1429         struct ftrace_hash *other_hash;
1430         struct ftrace_page *pg;
1431         struct dyn_ftrace *rec;
1432         int count = 0;
1433         int all = 0;
1434
1435         /* Only update if the ops has been registered */
1436         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1437                 return;
1438
1439         /*
1440          * In the filter_hash case:
1441          *   If the count is zero, we update all records.
1442          *   Otherwise we just update the items in the hash.
1443          *
1444          * In the notrace_hash case:
1445          *   We enable the update in the hash.
1446          *   As disabling notrace means enabling the tracing,
1447          *   and enabling notrace means disabling, the inc variable
1448          *   gets inversed.
1449          */
1450         if (filter_hash) {
1451                 hash = ops->filter_hash;
1452                 other_hash = ops->notrace_hash;
1453                 if (ftrace_hash_empty(hash))
1454                         all = 1;
1455         } else {
1456                 inc = !inc;
1457                 hash = ops->notrace_hash;
1458                 other_hash = ops->filter_hash;
1459                 /*
1460                  * If the notrace hash has no items,
1461                  * then there's nothing to do.
1462                  */
1463                 if (ftrace_hash_empty(hash))
1464                         return;
1465         }
1466
1467         do_for_each_ftrace_rec(pg, rec) {
1468                 int in_other_hash = 0;
1469                 int in_hash = 0;
1470                 int match = 0;
1471
1472                 if (all) {
1473                         /*
1474                          * Only the filter_hash affects all records.
1475                          * Update if the record is not in the notrace hash.
1476                          */
1477                         if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1478                                 match = 1;
1479                 } else {
1480                         in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1481                         in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1482
1483                         /*
1484                          *
1485                          */
1486                         if (filter_hash && in_hash && !in_other_hash)
1487                                 match = 1;
1488                         else if (!filter_hash && in_hash &&
1489                                  (in_other_hash || ftrace_hash_empty(other_hash)))
1490                                 match = 1;
1491                 }
1492                 if (!match)
1493                         continue;
1494
1495                 if (inc) {
1496                         rec->flags++;
1497                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1498                                 return;
1499                 } else {
1500                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1501                                 return;
1502                         rec->flags--;
1503                 }
1504                 count++;
1505                 /* Shortcut, if we handled all records, we are done. */
1506                 if (!all && count == hash->count)
1507                         return;
1508         } while_for_each_ftrace_rec();
1509 }
1510
1511 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1512                                     int filter_hash)
1513 {
1514         __ftrace_hash_rec_update(ops, filter_hash, 0);
1515 }
1516
1517 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1518                                    int filter_hash)
1519 {
1520         __ftrace_hash_rec_update(ops, filter_hash, 1);
1521 }
1522
1523 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1524 {
1525         if (ftrace_pages->index == ftrace_pages->size) {
1526                 /* We should have allocated enough */
1527                 if (WARN_ON(!ftrace_pages->next))
1528                         return NULL;
1529                 ftrace_pages = ftrace_pages->next;
1530         }
1531
1532         return &ftrace_pages->records[ftrace_pages->index++];
1533 }
1534
1535 static struct dyn_ftrace *
1536 ftrace_record_ip(unsigned long ip)
1537 {
1538         struct dyn_ftrace *rec;
1539
1540         if (ftrace_disabled)
1541                 return NULL;
1542
1543         rec = ftrace_alloc_dyn_node(ip);
1544         if (!rec)
1545                 return NULL;
1546
1547         rec->ip = ip;
1548
1549         return rec;
1550 }
1551
1552 static void print_ip_ins(const char *fmt, unsigned char *p)
1553 {
1554         int i;
1555
1556         printk(KERN_CONT "%s", fmt);
1557
1558         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1559                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1560 }
1561
1562 /**
1563  * ftrace_bug - report and shutdown function tracer
1564  * @failed: The failed type (EFAULT, EINVAL, EPERM)
1565  * @ip: The address that failed
1566  *
1567  * The arch code that enables or disables the function tracing
1568  * can call ftrace_bug() when it has detected a problem in
1569  * modifying the code. @failed should be one of either:
1570  * EFAULT - if the problem happens on reading the @ip address
1571  * EINVAL - if what is read at @ip is not what was expected
1572  * EPERM - if the problem happens on writting to the @ip address
1573  */
1574 void ftrace_bug(int failed, unsigned long ip)
1575 {
1576         switch (failed) {
1577         case -EFAULT:
1578                 FTRACE_WARN_ON_ONCE(1);
1579                 pr_info("ftrace faulted on modifying ");
1580                 print_ip_sym(ip);
1581                 break;
1582         case -EINVAL:
1583                 FTRACE_WARN_ON_ONCE(1);
1584                 pr_info("ftrace failed to modify ");
1585                 print_ip_sym(ip);
1586                 print_ip_ins(" actual: ", (unsigned char *)ip);
1587                 printk(KERN_CONT "\n");
1588                 break;
1589         case -EPERM:
1590                 FTRACE_WARN_ON_ONCE(1);
1591                 pr_info("ftrace faulted on writing ");
1592                 print_ip_sym(ip);
1593                 break;
1594         default:
1595                 FTRACE_WARN_ON_ONCE(1);
1596                 pr_info("ftrace faulted on unknown error ");
1597                 print_ip_sym(ip);
1598         }
1599 }
1600
1601
1602 /* Return 1 if the address range is reserved for ftrace */
1603 int ftrace_text_reserved(void *start, void *end)
1604 {
1605         struct dyn_ftrace *rec;
1606         struct ftrace_page *pg;
1607
1608         do_for_each_ftrace_rec(pg, rec) {
1609                 if (rec->ip <= (unsigned long)end &&
1610                     rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1611                         return 1;
1612         } while_for_each_ftrace_rec();
1613         return 0;
1614 }
1615
1616 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1617 {
1618         unsigned long flag = 0UL;
1619
1620         /*
1621          * If we are updating calls:
1622          *
1623          *   If the record has a ref count, then we need to enable it
1624          *   because someone is using it.
1625          *
1626          *   Otherwise we make sure its disabled.
1627          *
1628          * If we are disabling calls, then disable all records that
1629          * are enabled.
1630          */
1631         if (enable && (rec->flags & ~FTRACE_FL_MASK))
1632                 flag = FTRACE_FL_ENABLED;
1633
1634         /* If the state of this record hasn't changed, then do nothing */
1635         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1636                 return FTRACE_UPDATE_IGNORE;
1637
1638         if (flag) {
1639                 if (update)
1640                         rec->flags |= FTRACE_FL_ENABLED;
1641                 return FTRACE_UPDATE_MAKE_CALL;
1642         }
1643
1644         if (update)
1645                 rec->flags &= ~FTRACE_FL_ENABLED;
1646
1647         return FTRACE_UPDATE_MAKE_NOP;
1648 }
1649
1650 /**
1651  * ftrace_update_record, set a record that now is tracing or not
1652  * @rec: the record to update
1653  * @enable: set to 1 if the record is tracing, zero to force disable
1654  *
1655  * The records that represent all functions that can be traced need
1656  * to be updated when tracing has been enabled.
1657  */
1658 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1659 {
1660         return ftrace_check_record(rec, enable, 1);
1661 }
1662
1663 /**
1664  * ftrace_test_record, check if the record has been enabled or not
1665  * @rec: the record to test
1666  * @enable: set to 1 to check if enabled, 0 if it is disabled
1667  *
1668  * The arch code may need to test if a record is already set to
1669  * tracing to determine how to modify the function code that it
1670  * represents.
1671  */
1672 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1673 {
1674         return ftrace_check_record(rec, enable, 0);
1675 }
1676
1677 static int
1678 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1679 {
1680         unsigned long ftrace_addr;
1681         int ret;
1682
1683         ftrace_addr = (unsigned long)FTRACE_ADDR;
1684
1685         ret = ftrace_update_record(rec, enable);
1686
1687         switch (ret) {
1688         case FTRACE_UPDATE_IGNORE:
1689                 return 0;
1690
1691         case FTRACE_UPDATE_MAKE_CALL:
1692                 return ftrace_make_call(rec, ftrace_addr);
1693
1694         case FTRACE_UPDATE_MAKE_NOP:
1695                 return ftrace_make_nop(NULL, rec, ftrace_addr);
1696         }
1697
1698         return -1; /* unknow ftrace bug */
1699 }
1700
1701 static void ftrace_replace_code(int update)
1702 {
1703         struct dyn_ftrace *rec;
1704         struct ftrace_page *pg;
1705         int failed;
1706
1707         if (unlikely(ftrace_disabled))
1708                 return;
1709
1710         do_for_each_ftrace_rec(pg, rec) {
1711                 failed = __ftrace_replace_code(rec, update);
1712                 if (failed) {
1713                         ftrace_bug(failed, rec->ip);
1714                         /* Stop processing */
1715                         return;
1716                 }
1717         } while_for_each_ftrace_rec();
1718 }
1719
1720 struct ftrace_rec_iter {
1721         struct ftrace_page      *pg;
1722         int                     index;
1723 };
1724
1725 /**
1726  * ftrace_rec_iter_start, start up iterating over traced functions
1727  *
1728  * Returns an iterator handle that is used to iterate over all
1729  * the records that represent address locations where functions
1730  * are traced.
1731  *
1732  * May return NULL if no records are available.
1733  */
1734 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1735 {
1736         /*
1737          * We only use a single iterator.
1738          * Protected by the ftrace_lock mutex.
1739          */
1740         static struct ftrace_rec_iter ftrace_rec_iter;
1741         struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1742
1743         iter->pg = ftrace_pages_start;
1744         iter->index = 0;
1745
1746         /* Could have empty pages */
1747         while (iter->pg && !iter->pg->index)
1748                 iter->pg = iter->pg->next;
1749
1750         if (!iter->pg)
1751                 return NULL;
1752
1753         return iter;
1754 }
1755
1756 /**
1757  * ftrace_rec_iter_next, get the next record to process.
1758  * @iter: The handle to the iterator.
1759  *
1760  * Returns the next iterator after the given iterator @iter.
1761  */
1762 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1763 {
1764         iter->index++;
1765
1766         if (iter->index >= iter->pg->index) {
1767                 iter->pg = iter->pg->next;
1768                 iter->index = 0;
1769
1770                 /* Could have empty pages */
1771                 while (iter->pg && !iter->pg->index)
1772                         iter->pg = iter->pg->next;
1773         }
1774
1775         if (!iter->pg)
1776                 return NULL;
1777
1778         return iter;
1779 }
1780
1781 /**
1782  * ftrace_rec_iter_record, get the record at the iterator location
1783  * @iter: The current iterator location
1784  *
1785  * Returns the record that the current @iter is at.
1786  */
1787 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1788 {
1789         return &iter->pg->records[iter->index];
1790 }
1791
1792 static int
1793 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1794 {
1795         unsigned long ip;
1796         int ret;
1797
1798         ip = rec->ip;
1799
1800         if (unlikely(ftrace_disabled))
1801                 return 0;
1802
1803         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1804         if (ret) {
1805                 ftrace_bug(ret, ip);
1806                 return 0;
1807         }
1808         return 1;
1809 }
1810
1811 /*
1812  * archs can override this function if they must do something
1813  * before the modifying code is performed.
1814  */
1815 int __weak ftrace_arch_code_modify_prepare(void)
1816 {
1817         return 0;
1818 }
1819
1820 /*
1821  * archs can override this function if they must do something
1822  * after the modifying code is performed.
1823  */
1824 int __weak ftrace_arch_code_modify_post_process(void)
1825 {
1826         return 0;
1827 }
1828
1829 static int __ftrace_modify_code(void *data)
1830 {
1831         int *command = data;
1832
1833         if (*command & FTRACE_UPDATE_CALLS)
1834                 ftrace_replace_code(1);
1835         else if (*command & FTRACE_DISABLE_CALLS)
1836                 ftrace_replace_code(0);
1837
1838         if (*command & FTRACE_UPDATE_TRACE_FUNC)
1839                 ftrace_update_ftrace_func(ftrace_trace_function);
1840
1841         if (*command & FTRACE_START_FUNC_RET)
1842                 ftrace_enable_ftrace_graph_caller();
1843         else if (*command & FTRACE_STOP_FUNC_RET)
1844                 ftrace_disable_ftrace_graph_caller();
1845
1846         return 0;
1847 }
1848
1849 /**
1850  * ftrace_run_stop_machine, go back to the stop machine method
1851  * @command: The command to tell ftrace what to do
1852  *
1853  * If an arch needs to fall back to the stop machine method, the
1854  * it can call this function.
1855  */
1856 void ftrace_run_stop_machine(int command)
1857 {
1858         stop_machine(__ftrace_modify_code, &command, NULL);
1859 }
1860
1861 /**
1862  * arch_ftrace_update_code, modify the code to trace or not trace
1863  * @command: The command that needs to be done
1864  *
1865  * Archs can override this function if it does not need to
1866  * run stop_machine() to modify code.
1867  */
1868 void __weak arch_ftrace_update_code(int command)
1869 {
1870         ftrace_run_stop_machine(command);
1871 }
1872
1873 static void ftrace_run_update_code(int command)
1874 {
1875         int ret;
1876
1877         ret = ftrace_arch_code_modify_prepare();
1878         FTRACE_WARN_ON(ret);
1879         if (ret)
1880                 return;
1881         /*
1882          * Do not call function tracer while we update the code.
1883          * We are in stop machine.
1884          */
1885         function_trace_stop++;
1886
1887         /*
1888          * By default we use stop_machine() to modify the code.
1889          * But archs can do what ever they want as long as it
1890          * is safe. The stop_machine() is the safest, but also
1891          * produces the most overhead.
1892          */
1893         arch_ftrace_update_code(command);
1894
1895 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1896         /*
1897          * For archs that call ftrace_test_stop_func(), we must
1898          * wait till after we update all the function callers
1899          * before we update the callback. This keeps different
1900          * ops that record different functions from corrupting
1901          * each other.
1902          */
1903         __ftrace_trace_function = __ftrace_trace_function_delay;
1904 #endif
1905         function_trace_stop--;
1906
1907         ret = ftrace_arch_code_modify_post_process();
1908         FTRACE_WARN_ON(ret);
1909 }
1910
1911 static ftrace_func_t saved_ftrace_func;
1912 static int ftrace_start_up;
1913 static int global_start_up;
1914
1915 static void ftrace_startup_enable(int command)
1916 {
1917         if (saved_ftrace_func != ftrace_trace_function) {
1918                 saved_ftrace_func = ftrace_trace_function;
1919                 command |= FTRACE_UPDATE_TRACE_FUNC;
1920         }
1921
1922         if (!command || !ftrace_enabled)
1923                 return;
1924
1925         ftrace_run_update_code(command);
1926 }
1927
1928 static int ftrace_startup(struct ftrace_ops *ops, int command)
1929 {
1930         bool hash_enable = true;
1931
1932         if (unlikely(ftrace_disabled))
1933                 return -ENODEV;
1934
1935         ftrace_start_up++;
1936         command |= FTRACE_UPDATE_CALLS;
1937
1938         /* ops marked global share the filter hashes */
1939         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1940                 ops = &global_ops;
1941                 /* Don't update hash if global is already set */
1942                 if (global_start_up)
1943                         hash_enable = false;
1944                 global_start_up++;
1945         }
1946
1947         ops->flags |= FTRACE_OPS_FL_ENABLED;
1948         if (hash_enable)
1949                 ftrace_hash_rec_enable(ops, 1);
1950
1951         ftrace_startup_enable(command);
1952
1953         return 0;
1954 }
1955
1956 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1957 {
1958         bool hash_disable = true;
1959
1960         if (unlikely(ftrace_disabled))
1961                 return;
1962
1963         ftrace_start_up--;
1964         /*
1965          * Just warn in case of unbalance, no need to kill ftrace, it's not
1966          * critical but the ftrace_call callers may be never nopped again after
1967          * further ftrace uses.
1968          */
1969         WARN_ON_ONCE(ftrace_start_up < 0);
1970
1971         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1972                 ops = &global_ops;
1973                 global_start_up--;
1974                 WARN_ON_ONCE(global_start_up < 0);
1975                 /* Don't update hash if global still has users */
1976                 if (global_start_up) {
1977                         WARN_ON_ONCE(!ftrace_start_up);
1978                         hash_disable = false;
1979                 }
1980         }
1981
1982         if (hash_disable)
1983                 ftrace_hash_rec_disable(ops, 1);
1984
1985         if (ops != &global_ops || !global_start_up)
1986                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1987
1988         command |= FTRACE_UPDATE_CALLS;
1989
1990         if (saved_ftrace_func != ftrace_trace_function) {
1991                 saved_ftrace_func = ftrace_trace_function;
1992                 command |= FTRACE_UPDATE_TRACE_FUNC;
1993         }
1994
1995         if (!command || !ftrace_enabled)
1996                 return;
1997
1998         ftrace_run_update_code(command);
1999 }
2000
2001 static void ftrace_startup_sysctl(void)
2002 {
2003         if (unlikely(ftrace_disabled))
2004                 return;
2005
2006         /* Force update next time */
2007         saved_ftrace_func = NULL;
2008         /* ftrace_start_up is true if we want ftrace running */
2009         if (ftrace_start_up)
2010                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2011 }
2012
2013 static void ftrace_shutdown_sysctl(void)
2014 {
2015         if (unlikely(ftrace_disabled))
2016                 return;
2017
2018         /* ftrace_start_up is true if ftrace is running */
2019         if (ftrace_start_up)
2020                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2021 }
2022
2023 static cycle_t          ftrace_update_time;
2024 static unsigned long    ftrace_update_cnt;
2025 unsigned long           ftrace_update_tot_cnt;
2026
2027 static int ops_traces_mod(struct ftrace_ops *ops)
2028 {
2029         struct ftrace_hash *hash;
2030
2031         hash = ops->filter_hash;
2032         return ftrace_hash_empty(hash);
2033 }
2034
2035 static int ftrace_update_code(struct module *mod)
2036 {
2037         struct ftrace_page *pg;
2038         struct dyn_ftrace *p;
2039         cycle_t start, stop;
2040         unsigned long ref = 0;
2041         int i;
2042
2043         /*
2044          * When adding a module, we need to check if tracers are
2045          * currently enabled and if they are set to trace all functions.
2046          * If they are, we need to enable the module functions as well
2047          * as update the reference counts for those function records.
2048          */
2049         if (mod) {
2050                 struct ftrace_ops *ops;
2051
2052                 for (ops = ftrace_ops_list;
2053                      ops != &ftrace_list_end; ops = ops->next) {
2054                         if (ops->flags & FTRACE_OPS_FL_ENABLED &&
2055                             ops_traces_mod(ops))
2056                                 ref++;
2057                 }
2058         }
2059
2060         start = ftrace_now(raw_smp_processor_id());
2061         ftrace_update_cnt = 0;
2062
2063         for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2064
2065                 for (i = 0; i < pg->index; i++) {
2066                         /* If something went wrong, bail without enabling anything */
2067                         if (unlikely(ftrace_disabled))
2068                                 return -1;
2069
2070                         p = &pg->records[i];
2071                         p->flags = ref;
2072
2073                         /*
2074                          * Do the initial record conversion from mcount jump
2075                          * to the NOP instructions.
2076                          */
2077                         if (!ftrace_code_disable(mod, p))
2078                                 break;
2079
2080                         ftrace_update_cnt++;
2081
2082                         /*
2083                          * If the tracing is enabled, go ahead and enable the record.
2084                          *
2085                          * The reason not to enable the record immediatelly is the
2086                          * inherent check of ftrace_make_nop/ftrace_make_call for
2087                          * correct previous instructions.  Making first the NOP
2088                          * conversion puts the module to the correct state, thus
2089                          * passing the ftrace_make_call check.
2090                          */
2091                         if (ftrace_start_up && ref) {
2092                                 int failed = __ftrace_replace_code(p, 1);
2093                                 if (failed)
2094                                         ftrace_bug(failed, p->ip);
2095                         }
2096                 }
2097         }
2098
2099         ftrace_new_pgs = NULL;
2100
2101         stop = ftrace_now(raw_smp_processor_id());
2102         ftrace_update_time = stop - start;
2103         ftrace_update_tot_cnt += ftrace_update_cnt;
2104
2105         return 0;
2106 }
2107
2108 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2109 {
2110         int order;
2111         int cnt;
2112
2113         if (WARN_ON(!count))
2114                 return -EINVAL;
2115
2116         order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2117
2118         /*
2119          * We want to fill as much as possible. No more than a page
2120          * may be empty.
2121          */
2122         while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2123                 order--;
2124
2125  again:
2126         pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2127
2128         if (!pg->records) {
2129                 /* if we can't allocate this size, try something smaller */
2130                 if (!order)
2131                         return -ENOMEM;
2132                 order >>= 1;
2133                 goto again;
2134         }
2135
2136         cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2137         pg->size = cnt;
2138
2139         if (cnt > count)
2140                 cnt = count;
2141
2142         return cnt;
2143 }
2144
2145 static struct ftrace_page *
2146 ftrace_allocate_pages(unsigned long num_to_init)
2147 {
2148         struct ftrace_page *start_pg;
2149         struct ftrace_page *pg;
2150         int order;
2151         int cnt;
2152
2153         if (!num_to_init)
2154                 return 0;
2155
2156         start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2157         if (!pg)
2158                 return NULL;
2159
2160         /*
2161          * Try to allocate as much as possible in one continues
2162          * location that fills in all of the space. We want to
2163          * waste as little space as possible.
2164          */
2165         for (;;) {
2166                 cnt = ftrace_allocate_records(pg, num_to_init);
2167                 if (cnt < 0)
2168                         goto free_pages;
2169
2170                 num_to_init -= cnt;
2171                 if (!num_to_init)
2172                         break;
2173
2174                 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2175                 if (!pg->next)
2176                         goto free_pages;
2177
2178                 pg = pg->next;
2179         }
2180
2181         return start_pg;
2182
2183  free_pages:
2184         while (start_pg) {
2185                 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2186                 free_pages((unsigned long)pg->records, order);
2187                 start_pg = pg->next;
2188                 kfree(pg);
2189                 pg = start_pg;
2190         }
2191         pr_info("ftrace: FAILED to allocate memory for functions\n");
2192         return NULL;
2193 }
2194
2195 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2196 {
2197         int cnt;
2198
2199         if (!num_to_init) {
2200                 pr_info("ftrace: No functions to be traced?\n");
2201                 return -1;
2202         }
2203
2204         cnt = num_to_init / ENTRIES_PER_PAGE;
2205         pr_info("ftrace: allocating %ld entries in %d pages\n",
2206                 num_to_init, cnt + 1);
2207
2208         return 0;
2209 }
2210
2211 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2212
2213 struct ftrace_iterator {
2214         loff_t                          pos;
2215         loff_t                          func_pos;
2216         struct ftrace_page              *pg;
2217         struct dyn_ftrace               *func;
2218         struct ftrace_func_probe        *probe;
2219         struct trace_parser             parser;
2220         struct ftrace_hash              *hash;
2221         struct ftrace_ops               *ops;
2222         int                             hidx;
2223         int                             idx;
2224         unsigned                        flags;
2225 };
2226
2227 static void *
2228 t_hash_next(struct seq_file *m, loff_t *pos)
2229 {
2230         struct ftrace_iterator *iter = m->private;
2231         struct hlist_node *hnd = NULL;
2232         struct hlist_head *hhd;
2233
2234         (*pos)++;
2235         iter->pos = *pos;
2236
2237         if (iter->probe)
2238                 hnd = &iter->probe->node;
2239  retry:
2240         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2241                 return NULL;
2242
2243         hhd = &ftrace_func_hash[iter->hidx];
2244
2245         if (hlist_empty(hhd)) {
2246                 iter->hidx++;
2247                 hnd = NULL;
2248                 goto retry;
2249         }
2250
2251         if (!hnd)
2252                 hnd = hhd->first;
2253         else {
2254                 hnd = hnd->next;
2255                 if (!hnd) {
2256                         iter->hidx++;
2257                         goto retry;
2258                 }
2259         }
2260
2261         if (WARN_ON_ONCE(!hnd))
2262                 return NULL;
2263
2264         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2265
2266         return iter;
2267 }
2268
2269 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2270 {
2271         struct ftrace_iterator *iter = m->private;
2272         void *p = NULL;
2273         loff_t l;
2274
2275         if (!(iter->flags & FTRACE_ITER_DO_HASH))
2276                 return NULL;
2277
2278         if (iter->func_pos > *pos)
2279                 return NULL;
2280
2281         iter->hidx = 0;
2282         for (l = 0; l <= (*pos - iter->func_pos); ) {
2283                 p = t_hash_next(m, &l);
2284                 if (!p)
2285                         break;
2286         }
2287         if (!p)
2288                 return NULL;
2289
2290         /* Only set this if we have an item */
2291         iter->flags |= FTRACE_ITER_HASH;
2292
2293         return iter;
2294 }
2295
2296 static int
2297 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2298 {
2299         struct ftrace_func_probe *rec;
2300
2301         rec = iter->probe;
2302         if (WARN_ON_ONCE(!rec))
2303                 return -EIO;
2304
2305         if (rec->ops->print)
2306                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2307
2308         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2309
2310         if (rec->data)
2311                 seq_printf(m, ":%p", rec->data);
2312         seq_putc(m, '\n');
2313
2314         return 0;
2315 }
2316
2317 static void *
2318 t_next(struct seq_file *m, void *v, loff_t *pos)
2319 {
2320         struct ftrace_iterator *iter = m->private;
2321         struct ftrace_ops *ops = iter->ops;
2322         struct dyn_ftrace *rec = NULL;
2323
2324         if (unlikely(ftrace_disabled))
2325                 return NULL;
2326
2327         if (iter->flags & FTRACE_ITER_HASH)
2328                 return t_hash_next(m, pos);
2329
2330         (*pos)++;
2331         iter->pos = iter->func_pos = *pos;
2332
2333         if (iter->flags & FTRACE_ITER_PRINTALL)
2334                 return t_hash_start(m, pos);
2335
2336  retry:
2337         if (iter->idx >= iter->pg->index) {
2338                 if (iter->pg->next) {
2339                         iter->pg = iter->pg->next;
2340                         iter->idx = 0;
2341                         goto retry;
2342                 }
2343         } else {
2344                 rec = &iter->pg->records[iter->idx++];
2345                 if (((iter->flags & FTRACE_ITER_FILTER) &&
2346                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2347
2348                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
2349                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2350
2351                     ((iter->flags & FTRACE_ITER_ENABLED) &&
2352                      !(rec->flags & ~FTRACE_FL_MASK))) {
2353
2354                         rec = NULL;
2355                         goto retry;
2356                 }
2357         }
2358
2359         if (!rec)
2360                 return t_hash_start(m, pos);
2361
2362         iter->func = rec;
2363
2364         return iter;
2365 }
2366
2367 static void reset_iter_read(struct ftrace_iterator *iter)
2368 {
2369         iter->pos = 0;
2370         iter->func_pos = 0;
2371         iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
2372 }
2373
2374 static void *t_start(struct seq_file *m, loff_t *pos)
2375 {
2376         struct ftrace_iterator *iter = m->private;
2377         struct ftrace_ops *ops = iter->ops;
2378         void *p = NULL;
2379         loff_t l;
2380
2381         mutex_lock(&ftrace_lock);
2382
2383         if (unlikely(ftrace_disabled))
2384                 return NULL;
2385
2386         /*
2387          * If an lseek was done, then reset and start from beginning.
2388          */
2389         if (*pos < iter->pos)
2390                 reset_iter_read(iter);
2391
2392         /*
2393          * For set_ftrace_filter reading, if we have the filter
2394          * off, we can short cut and just print out that all
2395          * functions are enabled.
2396          */
2397         if (iter->flags & FTRACE_ITER_FILTER &&
2398             ftrace_hash_empty(ops->filter_hash)) {
2399                 if (*pos > 0)
2400                         return t_hash_start(m, pos);
2401                 iter->flags |= FTRACE_ITER_PRINTALL;
2402                 /* reset in case of seek/pread */
2403                 iter->flags &= ~FTRACE_ITER_HASH;
2404                 return iter;
2405         }
2406
2407         if (iter->flags & FTRACE_ITER_HASH)
2408                 return t_hash_start(m, pos);
2409
2410         /*
2411          * Unfortunately, we need to restart at ftrace_pages_start
2412          * every time we let go of the ftrace_mutex. This is because
2413          * those pointers can change without the lock.
2414          */
2415         iter->pg = ftrace_pages_start;
2416         iter->idx = 0;
2417         for (l = 0; l <= *pos; ) {
2418                 p = t_next(m, p, &l);
2419                 if (!p)
2420                         break;
2421         }
2422
2423         if (!p)
2424                 return t_hash_start(m, pos);
2425
2426         return iter;
2427 }
2428
2429 static void t_stop(struct seq_file *m, void *p)
2430 {
2431         mutex_unlock(&ftrace_lock);
2432 }
2433
2434 static int t_show(struct seq_file *m, void *v)
2435 {
2436         struct ftrace_iterator *iter = m->private;
2437         struct dyn_ftrace *rec;
2438
2439         if (iter->flags & FTRACE_ITER_HASH)
2440                 return t_hash_show(m, iter);
2441
2442         if (iter->flags & FTRACE_ITER_PRINTALL) {
2443                 seq_printf(m, "#### all functions enabled ####\n");
2444                 return 0;
2445         }
2446
2447         rec = iter->func;
2448
2449         if (!rec)
2450                 return 0;
2451
2452         seq_printf(m, "%ps", (void *)rec->ip);
2453         if (iter->flags & FTRACE_ITER_ENABLED)
2454                 seq_printf(m, " (%ld)",
2455                            rec->flags & ~FTRACE_FL_MASK);
2456         seq_printf(m, "\n");
2457
2458         return 0;
2459 }
2460
2461 static const struct seq_operations show_ftrace_seq_ops = {
2462         .start = t_start,
2463         .next = t_next,
2464         .stop = t_stop,
2465         .show = t_show,
2466 };
2467
2468 static int
2469 ftrace_avail_open(struct inode *inode, struct file *file)
2470 {
2471         struct ftrace_iterator *iter;
2472         int ret;
2473
2474         if (unlikely(ftrace_disabled))
2475                 return -ENODEV;
2476
2477         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2478         if (!iter)
2479                 return -ENOMEM;
2480
2481         iter->pg = ftrace_pages_start;
2482         iter->ops = &global_ops;
2483
2484         ret = seq_open(file, &show_ftrace_seq_ops);
2485         if (!ret) {
2486                 struct seq_file *m = file->private_data;
2487
2488                 m->private = iter;
2489         } else {
2490                 kfree(iter);
2491         }
2492
2493         return ret;
2494 }
2495
2496 static int
2497 ftrace_enabled_open(struct inode *inode, struct file *file)
2498 {
2499         struct ftrace_iterator *iter;
2500         int ret;
2501
2502         if (unlikely(ftrace_disabled))
2503                 return -ENODEV;
2504
2505         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2506         if (!iter)
2507                 return -ENOMEM;
2508
2509         iter->pg = ftrace_pages_start;
2510         iter->flags = FTRACE_ITER_ENABLED;
2511         iter->ops = &global_ops;
2512
2513         ret = seq_open(file, &show_ftrace_seq_ops);
2514         if (!ret) {
2515                 struct seq_file *m = file->private_data;
2516
2517                 m->private = iter;
2518         } else {
2519                 kfree(iter);
2520         }
2521
2522         return ret;
2523 }
2524
2525 static void ftrace_filter_reset(struct ftrace_hash *hash)
2526 {
2527         mutex_lock(&ftrace_lock);
2528         ftrace_hash_clear(hash);
2529         mutex_unlock(&ftrace_lock);
2530 }
2531
2532 /**
2533  * ftrace_regex_open - initialize function tracer filter files
2534  * @ops: The ftrace_ops that hold the hash filters
2535  * @flag: The type of filter to process
2536  * @inode: The inode, usually passed in to your open routine
2537  * @file: The file, usually passed in to your open routine
2538  *
2539  * ftrace_regex_open() initializes the filter files for the
2540  * @ops. Depending on @flag it may process the filter hash or
2541  * the notrace hash of @ops. With this called from the open
2542  * routine, you can use ftrace_filter_write() for the write
2543  * routine if @flag has FTRACE_ITER_FILTER set, or
2544  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2545  * ftrace_regex_lseek() should be used as the lseek routine, and
2546  * release must call ftrace_regex_release().
2547  */
2548 int
2549 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2550                   struct inode *inode, struct file *file)
2551 {
2552         struct ftrace_iterator *iter;
2553         struct ftrace_hash *hash;
2554         int ret = 0;
2555
2556         if (unlikely(ftrace_disabled))
2557                 return -ENODEV;
2558
2559         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2560         if (!iter)
2561                 return -ENOMEM;
2562
2563         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2564                 kfree(iter);
2565                 return -ENOMEM;
2566         }
2567
2568         if (flag & FTRACE_ITER_NOTRACE)
2569                 hash = ops->notrace_hash;
2570         else
2571                 hash = ops->filter_hash;
2572
2573         iter->ops = ops;
2574         iter->flags = flag;
2575
2576         if (file->f_mode & FMODE_WRITE) {
2577                 mutex_lock(&ftrace_lock);
2578                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2579                 mutex_unlock(&ftrace_lock);
2580
2581                 if (!iter->hash) {
2582                         trace_parser_put(&iter->parser);
2583                         kfree(iter);
2584                         return -ENOMEM;
2585                 }
2586         }
2587
2588         mutex_lock(&ftrace_regex_lock);
2589
2590         if ((file->f_mode & FMODE_WRITE) &&
2591             (file->f_flags & O_TRUNC))
2592                 ftrace_filter_reset(iter->hash);
2593
2594         if (file->f_mode & FMODE_READ) {
2595                 iter->pg = ftrace_pages_start;
2596
2597                 ret = seq_open(file, &show_ftrace_seq_ops);
2598                 if (!ret) {
2599                         struct seq_file *m = file->private_data;
2600                         m->private = iter;
2601                 } else {
2602                         /* Failed */
2603                         free_ftrace_hash(iter->hash);
2604                         trace_parser_put(&iter->parser);
2605                         kfree(iter);
2606                 }
2607         } else
2608                 file->private_data = iter;
2609         mutex_unlock(&ftrace_regex_lock);
2610
2611         return ret;
2612 }
2613
2614 static int
2615 ftrace_filter_open(struct inode *inode, struct file *file)
2616 {
2617         return ftrace_regex_open(&global_ops,
2618                         FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2619                         inode, file);
2620 }
2621
2622 static int
2623 ftrace_notrace_open(struct inode *inode, struct file *file)
2624 {
2625         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2626                                  inode, file);
2627 }
2628
2629 loff_t
2630 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
2631 {
2632         loff_t ret;
2633
2634         if (file->f_mode & FMODE_READ)
2635                 ret = seq_lseek(file, offset, origin);
2636         else
2637                 file->f_pos = ret = 1;
2638
2639         return ret;
2640 }
2641
2642 static int ftrace_match(char *str, char *regex, int len, int type)
2643 {
2644         int matched = 0;
2645         int slen;
2646
2647         switch (type) {
2648         case MATCH_FULL:
2649                 if (strcmp(str, regex) == 0)
2650                         matched = 1;
2651                 break;
2652         case MATCH_FRONT_ONLY:
2653                 if (strncmp(str, regex, len) == 0)
2654                         matched = 1;
2655                 break;
2656         case MATCH_MIDDLE_ONLY:
2657                 if (strstr(str, regex))
2658                         matched = 1;
2659                 break;
2660         case MATCH_END_ONLY:
2661                 slen = strlen(str);
2662                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2663                         matched = 1;
2664                 break;
2665         }
2666
2667         return matched;
2668 }
2669
2670 static int
2671 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2672 {
2673         struct ftrace_func_entry *entry;
2674         int ret = 0;
2675
2676         entry = ftrace_lookup_ip(hash, rec->ip);
2677         if (not) {
2678                 /* Do nothing if it doesn't exist */
2679                 if (!entry)
2680                         return 0;
2681
2682                 free_hash_entry(hash, entry);
2683         } else {
2684                 /* Do nothing if it exists */
2685                 if (entry)
2686                         return 0;
2687
2688                 ret = add_hash_entry(hash, rec->ip);
2689         }
2690         return ret;
2691 }
2692
2693 static int
2694 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2695                     char *regex, int len, int type)
2696 {
2697         char str[KSYM_SYMBOL_LEN];
2698         char *modname;
2699
2700         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2701
2702         if (mod) {
2703                 /* module lookup requires matching the module */
2704                 if (!modname || strcmp(modname, mod))
2705                         return 0;
2706
2707                 /* blank search means to match all funcs in the mod */
2708                 if (!len)
2709                         return 1;
2710         }
2711
2712         return ftrace_match(str, regex, len, type);
2713 }
2714
2715 static int
2716 match_records(struct ftrace_hash *hash, char *buff,
2717               int len, char *mod, int not)
2718 {
2719         unsigned search_len = 0;
2720         struct ftrace_page *pg;
2721         struct dyn_ftrace *rec;
2722         int type = MATCH_FULL;
2723         char *search = buff;
2724         int found = 0;
2725         int ret;
2726
2727         if (len) {
2728                 type = filter_parse_regex(buff, len, &search, &not);
2729                 search_len = strlen(search);
2730         }
2731
2732         mutex_lock(&ftrace_lock);
2733
2734         if (unlikely(ftrace_disabled))
2735                 goto out_unlock;
2736
2737         do_for_each_ftrace_rec(pg, rec) {
2738                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2739                         ret = enter_record(hash, rec, not);
2740                         if (ret < 0) {
2741                                 found = ret;
2742                                 goto out_unlock;
2743                         }
2744                         found = 1;
2745                 }
2746         } while_for_each_ftrace_rec();
2747  out_unlock:
2748         mutex_unlock(&ftrace_lock);
2749
2750         return found;
2751 }
2752
2753 static int
2754 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2755 {
2756         return match_records(hash, buff, len, NULL, 0);
2757 }
2758
2759 static int
2760 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2761 {
2762         int not = 0;
2763
2764         /* blank or '*' mean the same */
2765         if (strcmp(buff, "*") == 0)
2766                 buff[0] = 0;
2767
2768         /* handle the case of 'dont filter this module' */
2769         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2770                 buff[0] = 0;
2771                 not = 1;
2772         }
2773
2774         return match_records(hash, buff, strlen(buff), mod, not);
2775 }
2776
2777 /*
2778  * We register the module command as a template to show others how
2779  * to register the a command as well.
2780  */
2781
2782 static int
2783 ftrace_mod_callback(struct ftrace_hash *hash,
2784                     char *func, char *cmd, char *param, int enable)
2785 {
2786         char *mod;
2787         int ret = -EINVAL;
2788
2789         /*
2790          * cmd == 'mod' because we only registered this func
2791          * for the 'mod' ftrace_func_command.
2792          * But if you register one func with multiple commands,
2793          * you can tell which command was used by the cmd
2794          * parameter.
2795          */
2796
2797         /* we must have a module name */
2798         if (!param)
2799                 return ret;
2800
2801         mod = strsep(&param, ":");
2802         if (!strlen(mod))
2803                 return ret;
2804
2805         ret = ftrace_match_module_records(hash, func, mod);
2806         if (!ret)
2807                 ret = -EINVAL;
2808         if (ret < 0)
2809                 return ret;
2810
2811         return 0;
2812 }
2813
2814 static struct ftrace_func_command ftrace_mod_cmd = {
2815         .name                   = "mod",
2816         .func                   = ftrace_mod_callback,
2817 };
2818
2819 static int __init ftrace_mod_cmd_init(void)
2820 {
2821         return register_ftrace_command(&ftrace_mod_cmd);
2822 }
2823 device_initcall(ftrace_mod_cmd_init);
2824
2825 static void
2826 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2827 {
2828         struct ftrace_func_probe *entry;
2829         struct hlist_head *hhd;
2830         struct hlist_node *n;
2831         unsigned long key;
2832
2833         key = hash_long(ip, FTRACE_HASH_BITS);
2834
2835         hhd = &ftrace_func_hash[key];
2836
2837         if (hlist_empty(hhd))
2838                 return;
2839
2840         /*
2841          * Disable preemption for these calls to prevent a RCU grace
2842          * period. This syncs the hash iteration and freeing of items
2843          * on the hash. rcu_read_lock is too dangerous here.
2844          */
2845         preempt_disable_notrace();
2846         hlist_for_each_entry_rcu(entry, n, hhd, node) {
2847                 if (entry->ip == ip)
2848                         entry->ops->func(ip, parent_ip, &entry->data);
2849         }
2850         preempt_enable_notrace();
2851 }
2852
2853 static struct ftrace_ops trace_probe_ops __read_mostly =
2854 {
2855         .func           = function_trace_probe_call,
2856 };
2857
2858 static int ftrace_probe_registered;
2859
2860 static void __enable_ftrace_function_probe(void)
2861 {
2862         int ret;
2863         int i;
2864
2865         if (ftrace_probe_registered)
2866                 return;
2867
2868         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2869                 struct hlist_head *hhd = &ftrace_func_hash[i];
2870                 if (hhd->first)
2871                         break;
2872         }
2873         /* Nothing registered? */
2874         if (i == FTRACE_FUNC_HASHSIZE)
2875                 return;
2876
2877         ret = __register_ftrace_function(&trace_probe_ops);
2878         if (!ret)
2879                 ret = ftrace_startup(&trace_probe_ops, 0);
2880
2881         ftrace_probe_registered = 1;
2882 }
2883
2884 static void __disable_ftrace_function_probe(void)
2885 {
2886         int ret;
2887         int i;
2888
2889         if (!ftrace_probe_registered)
2890                 return;
2891
2892         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2893                 struct hlist_head *hhd = &ftrace_func_hash[i];
2894                 if (hhd->first)
2895                         return;
2896         }
2897
2898         /* no more funcs left */
2899         ret = __unregister_ftrace_function(&trace_probe_ops);
2900         if (!ret)
2901                 ftrace_shutdown(&trace_probe_ops, 0);
2902
2903         ftrace_probe_registered = 0;
2904 }
2905
2906
2907 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2908 {
2909         struct ftrace_func_probe *entry =
2910                 container_of(rhp, struct ftrace_func_probe, rcu);
2911
2912         if (entry->ops->free)
2913                 entry->ops->free(&entry->data);
2914         kfree(entry);
2915 }
2916
2917
2918 int
2919 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2920                               void *data)
2921 {
2922         struct ftrace_func_probe *entry;
2923         struct ftrace_page *pg;
2924         struct dyn_ftrace *rec;
2925         int type, len, not;
2926         unsigned long key;
2927         int count = 0;
2928         char *search;
2929
2930         type = filter_parse_regex(glob, strlen(glob), &search, &not);
2931         len = strlen(search);
2932
2933         /* we do not support '!' for function probes */
2934         if (WARN_ON(not))
2935                 return -EINVAL;
2936
2937         mutex_lock(&ftrace_lock);
2938
2939         if (unlikely(ftrace_disabled))
2940                 goto out_unlock;
2941
2942         do_for_each_ftrace_rec(pg, rec) {
2943
2944                 if (!ftrace_match_record(rec, NULL, search, len, type))
2945                         continue;
2946
2947                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2948                 if (!entry) {
2949                         /* If we did not process any, then return error */
2950                         if (!count)
2951                                 count = -ENOMEM;
2952                         goto out_unlock;
2953                 }
2954
2955                 count++;
2956
2957                 entry->data = data;
2958
2959                 /*
2960                  * The caller might want to do something special
2961                  * for each function we find. We call the callback
2962                  * to give the caller an opportunity to do so.
2963                  */
2964                 if (ops->callback) {
2965                         if (ops->callback(rec->ip, &entry->data) < 0) {
2966                                 /* caller does not like this func */
2967                                 kfree(entry);
2968                                 continue;
2969                         }
2970                 }
2971
2972                 entry->ops = ops;
2973                 entry->ip = rec->ip;
2974
2975                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2976                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2977
2978         } while_for_each_ftrace_rec();
2979         __enable_ftrace_function_probe();
2980
2981  out_unlock:
2982         mutex_unlock(&ftrace_lock);
2983
2984         return count;
2985 }
2986
2987 enum {
2988         PROBE_TEST_FUNC         = 1,
2989         PROBE_TEST_DATA         = 2
2990 };
2991
2992 static void
2993 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2994                                   void *data, int flags)
2995 {
2996         struct ftrace_func_probe *entry;
2997         struct hlist_node *n, *tmp;
2998         char str[KSYM_SYMBOL_LEN];
2999         int type = MATCH_FULL;
3000         int i, len = 0;
3001         char *search;
3002
3003         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3004                 glob = NULL;
3005         else if (glob) {
3006                 int not;
3007
3008                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3009                 len = strlen(search);
3010
3011                 /* we do not support '!' for function probes */
3012                 if (WARN_ON(not))
3013                         return;
3014         }
3015
3016         mutex_lock(&ftrace_lock);
3017         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3018                 struct hlist_head *hhd = &ftrace_func_hash[i];
3019
3020                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
3021
3022                         /* break up if statements for readability */
3023                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3024                                 continue;
3025
3026                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
3027                                 continue;
3028
3029                         /* do this last, since it is the most expensive */
3030                         if (glob) {
3031                                 kallsyms_lookup(entry->ip, NULL, NULL,
3032                                                 NULL, str);
3033                                 if (!ftrace_match(str, glob, len, type))
3034                                         continue;
3035                         }
3036
3037                         hlist_del(&entry->node);
3038                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
3039                 }
3040         }
3041         __disable_ftrace_function_probe();
3042         mutex_unlock(&ftrace_lock);
3043 }
3044
3045 void
3046 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3047                                 void *data)
3048 {
3049         __unregister_ftrace_function_probe(glob, ops, data,
3050                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
3051 }
3052
3053 void
3054 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3055 {
3056         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3057 }
3058
3059 void unregister_ftrace_function_probe_all(char *glob)
3060 {
3061         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3062 }
3063
3064 static LIST_HEAD(ftrace_commands);
3065 static DEFINE_MUTEX(ftrace_cmd_mutex);
3066
3067 int register_ftrace_command(struct ftrace_func_command *cmd)
3068 {
3069         struct ftrace_func_command *p;
3070         int ret = 0;
3071
3072         mutex_lock(&ftrace_cmd_mutex);
3073         list_for_each_entry(p, &ftrace_commands, list) {
3074                 if (strcmp(cmd->name, p->name) == 0) {
3075                         ret = -EBUSY;
3076                         goto out_unlock;
3077                 }
3078         }
3079         list_add(&cmd->list, &ftrace_commands);
3080  out_unlock:
3081         mutex_unlock(&ftrace_cmd_mutex);
3082
3083         return ret;
3084 }
3085
3086 int unregister_ftrace_command(struct ftrace_func_command *cmd)
3087 {
3088         struct ftrace_func_command *p, *n;
3089         int ret = -ENODEV;
3090
3091         mutex_lock(&ftrace_cmd_mutex);
3092         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3093                 if (strcmp(cmd->name, p->name) == 0) {
3094                         ret = 0;
3095                         list_del_init(&p->list);
3096                         goto out_unlock;
3097                 }
3098         }
3099  out_unlock:
3100         mutex_unlock(&ftrace_cmd_mutex);
3101
3102         return ret;
3103 }
3104
3105 static int ftrace_process_regex(struct ftrace_hash *hash,
3106                                 char *buff, int len, int enable)
3107 {
3108         char *func, *command, *next = buff;
3109         struct ftrace_func_command *p;
3110         int ret = -EINVAL;
3111
3112         func = strsep(&next, ":");
3113
3114         if (!next) {
3115                 ret = ftrace_match_records(hash, func, len);
3116                 if (!ret)
3117                         ret = -EINVAL;
3118                 if (ret < 0)
3119                         return ret;
3120                 return 0;
3121         }
3122
3123         /* command found */
3124
3125         command = strsep(&next, ":");
3126
3127         mutex_lock(&ftrace_cmd_mutex);
3128         list_for_each_entry(p, &ftrace_commands, list) {
3129                 if (strcmp(p->name, command) == 0) {
3130                         ret = p->func(hash, func, command, next, enable);
3131                         goto out_unlock;
3132                 }
3133         }
3134  out_unlock:
3135         mutex_unlock(&ftrace_cmd_mutex);
3136
3137         return ret;
3138 }
3139
3140 static ssize_t
3141 ftrace_regex_write(struct file *file, const char __user *ubuf,
3142                    size_t cnt, loff_t *ppos, int enable)
3143 {
3144         struct ftrace_iterator *iter;
3145         struct trace_parser *parser;
3146         ssize_t ret, read;
3147
3148         if (!cnt)
3149                 return 0;
3150
3151         mutex_lock(&ftrace_regex_lock);
3152
3153         ret = -ENODEV;
3154         if (unlikely(ftrace_disabled))
3155                 goto out_unlock;
3156
3157         if (file->f_mode & FMODE_READ) {
3158                 struct seq_file *m = file->private_data;
3159                 iter = m->private;
3160         } else
3161                 iter = file->private_data;
3162
3163         parser = &iter->parser;
3164         read = trace_get_user(parser, ubuf, cnt, ppos);
3165
3166         if (read >= 0 && trace_parser_loaded(parser) &&
3167             !trace_parser_cont(parser)) {
3168                 ret = ftrace_process_regex(iter->hash, parser->buffer,
3169                                            parser->idx, enable);
3170                 trace_parser_clear(parser);
3171                 if (ret)
3172                         goto out_unlock;
3173         }
3174
3175         ret = read;
3176 out_unlock:
3177         mutex_unlock(&ftrace_regex_lock);
3178
3179         return ret;
3180 }
3181
3182 ssize_t
3183 ftrace_filter_write(struct file *file, const char __user *ubuf,
3184                     size_t cnt, loff_t *ppos)
3185 {
3186         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3187 }
3188
3189 ssize_t
3190 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3191                      size_t cnt, loff_t *ppos)
3192 {
3193         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3194 }
3195
3196 static int
3197 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3198                  int reset, int enable)
3199 {
3200         struct ftrace_hash **orig_hash;
3201         struct ftrace_hash *hash;
3202         int ret;
3203
3204         /* All global ops uses the global ops filters */
3205         if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3206                 ops = &global_ops;
3207
3208         if (unlikely(ftrace_disabled))
3209                 return -ENODEV;
3210
3211         if (enable)
3212                 orig_hash = &ops->filter_hash;
3213         else
3214                 orig_hash = &ops->notrace_hash;
3215
3216         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3217         if (!hash)
3218                 return -ENOMEM;
3219
3220         mutex_lock(&ftrace_regex_lock);
3221         if (reset)
3222                 ftrace_filter_reset(hash);
3223         if (buf && !ftrace_match_records(hash, buf, len)) {
3224                 ret = -EINVAL;
3225                 goto out_regex_unlock;
3226         }
3227
3228         mutex_lock(&ftrace_lock);
3229         ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3230         if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
3231             && ftrace_enabled)
3232                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3233
3234         mutex_unlock(&ftrace_lock);
3235
3236  out_regex_unlock:
3237         mutex_unlock(&ftrace_regex_lock);
3238
3239         free_ftrace_hash(hash);
3240         return ret;
3241 }
3242
3243 /**
3244  * ftrace_set_filter - set a function to filter on in ftrace
3245  * @ops - the ops to set the filter with
3246  * @buf - the string that holds the function filter text.
3247  * @len - the length of the string.
3248  * @reset - non zero to reset all filters before applying this filter.
3249  *
3250  * Filters denote which functions should be enabled when tracing is enabled.
3251  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3252  */
3253 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3254                        int len, int reset)
3255 {
3256         return ftrace_set_regex(ops, buf, len, reset, 1);
3257 }
3258 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3259
3260 /**
3261  * ftrace_set_notrace - set a function to not trace in ftrace
3262  * @ops - the ops to set the notrace filter with
3263  * @buf - the string that holds the function notrace text.
3264  * @len - the length of the string.
3265  * @reset - non zero to reset all filters before applying this filter.
3266  *
3267  * Notrace Filters denote which functions should not be enabled when tracing
3268  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3269  * for tracing.
3270  */
3271 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3272                         int len, int reset)
3273 {
3274         return ftrace_set_regex(ops, buf, len, reset, 0);
3275 }
3276 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3277 /**
3278  * ftrace_set_filter - set a function to filter on in ftrace
3279  * @ops - the ops to set the filter with
3280  * @buf - the string that holds the function filter text.
3281  * @len - the length of the string.
3282  * @reset - non zero to reset all filters before applying this filter.
3283  *
3284  * Filters denote which functions should be enabled when tracing is enabled.
3285  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3286  */
3287 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3288 {
3289         ftrace_set_regex(&global_ops, buf, len, reset, 1);
3290 }
3291 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3292
3293 /**
3294  * ftrace_set_notrace - set a function to not trace in ftrace
3295  * @ops - the ops to set the notrace filter with
3296  * @buf - the string that holds the function notrace text.
3297  * @len - the length of the string.
3298  * @reset - non zero to reset all filters before applying this filter.
3299  *
3300  * Notrace Filters denote which functions should not be enabled when tracing
3301  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3302  * for tracing.
3303  */
3304 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3305 {
3306         ftrace_set_regex(&global_ops, buf, len, reset, 0);
3307 }
3308 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3309
3310 /*
3311  * command line interface to allow users to set filters on boot up.
3312  */
3313 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
3314 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3315 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3316
3317 static int __init set_ftrace_notrace(char *str)
3318 {
3319         strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3320         return 1;
3321 }
3322 __setup("ftrace_notrace=", set_ftrace_notrace);
3323
3324 static int __init set_ftrace_filter(char *str)
3325 {
3326         strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3327         return 1;
3328 }
3329 __setup("ftrace_filter=", set_ftrace_filter);
3330
3331 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3332 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3333 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3334
3335 static int __init set_graph_function(char *str)
3336 {
3337         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3338         return 1;
3339 }
3340 __setup("ftrace_graph_filter=", set_graph_function);
3341
3342 static void __init set_ftrace_early_graph(char *buf)
3343 {
3344         int ret;
3345         char *func;
3346
3347         while (buf) {
3348                 func = strsep(&buf, ",");
3349                 /* we allow only one expression at a time */
3350                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3351                                       func);
3352                 if (ret)
3353                         printk(KERN_DEBUG "ftrace: function %s not "
3354                                           "traceable\n", func);
3355         }
3356 }
3357 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3358
3359 void __init
3360 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3361 {
3362         char *func;
3363
3364         while (buf) {
3365                 func = strsep(&buf, ",");
3366                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3367         }
3368 }
3369
3370 static void __init set_ftrace_early_filters(void)
3371 {
3372         if (ftrace_filter_buf[0])
3373                 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3374         if (ftrace_notrace_buf[0])
3375                 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3376 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3377         if (ftrace_graph_buf[0])
3378                 set_ftrace_early_graph(ftrace_graph_buf);
3379 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3380 }
3381
3382 int ftrace_regex_release(struct inode *inode, struct file *file)
3383 {
3384         struct seq_file *m = (struct seq_file *)file->private_data;
3385         struct ftrace_iterator *iter;
3386         struct ftrace_hash **orig_hash;
3387         struct trace_parser *parser;
3388         int filter_hash;
3389         int ret;
3390
3391         mutex_lock(&ftrace_regex_lock);
3392         if (file->f_mode & FMODE_READ) {
3393                 iter = m->private;
3394
3395                 seq_release(inode, file);
3396         } else
3397                 iter = file->private_data;
3398
3399         parser = &iter->parser;
3400         if (trace_parser_loaded(parser)) {
3401                 parser->buffer[parser->idx] = 0;
3402                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3403         }
3404
3405         trace_parser_put(parser);
3406
3407         if (file->f_mode & FMODE_WRITE) {
3408                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3409
3410                 if (filter_hash)
3411                         orig_hash = &iter->ops->filter_hash;
3412                 else
3413                         orig_hash = &iter->ops->notrace_hash;
3414
3415                 mutex_lock(&ftrace_lock);
3416                 ret = ftrace_hash_move(iter->ops, filter_hash,
3417                                        orig_hash, iter->hash);
3418                 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3419                     && ftrace_enabled)
3420                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3421
3422                 mutex_unlock(&ftrace_lock);
3423         }
3424         free_ftrace_hash(iter->hash);
3425         kfree(iter);
3426
3427         mutex_unlock(&ftrace_regex_lock);
3428         return 0;
3429 }
3430
3431 static const struct file_operations ftrace_avail_fops = {
3432         .open = ftrace_avail_open,
3433         .read = seq_read,
3434         .llseek = seq_lseek,
3435         .release = seq_release_private,
3436 };
3437
3438 static const struct file_operations ftrace_enabled_fops = {
3439         .open = ftrace_enabled_open,
3440         .read = seq_read,
3441         .llseek = seq_lseek,
3442         .release = seq_release_private,
3443 };
3444
3445 static const struct file_operations ftrace_filter_fops = {
3446         .open = ftrace_filter_open,
3447         .read = seq_read,
3448         .write = ftrace_filter_write,
3449         .llseek = ftrace_regex_lseek,
3450         .release = ftrace_regex_release,
3451 };
3452
3453 static const struct file_operations ftrace_notrace_fops = {
3454         .open = ftrace_notrace_open,
3455         .read = seq_read,
3456         .write = ftrace_notrace_write,
3457         .llseek = ftrace_regex_lseek,
3458         .release = ftrace_regex_release,
3459 };
3460
3461 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3462
3463 static DEFINE_MUTEX(graph_lock);
3464
3465 int ftrace_graph_count;
3466 int ftrace_graph_filter_enabled;
3467 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3468
3469 static void *
3470 __g_next(struct seq_file *m, loff_t *pos)
3471 {
3472         if (*pos >= ftrace_graph_count)
3473                 return NULL;
3474         return &ftrace_graph_funcs[*pos];
3475 }
3476
3477 static void *
3478 g_next(struct seq_file *m, void *v, loff_t *pos)
3479 {
3480         (*pos)++;
3481         return __g_next(m, pos);
3482 }
3483
3484 static void *g_start(struct seq_file *m, loff_t *pos)
3485 {
3486         mutex_lock(&graph_lock);
3487
3488         /* Nothing, tell g_show to print all functions are enabled */
3489         if (!ftrace_graph_filter_enabled && !*pos)
3490                 return (void *)1;
3491
3492         return __g_next(m, pos);
3493 }
3494
3495 static void g_stop(struct seq_file *m, void *p)
3496 {
3497         mutex_unlock(&graph_lock);
3498 }
3499
3500 static int g_show(struct seq_file *m, void *v)
3501 {
3502         unsigned long *ptr = v;
3503
3504         if (!ptr)
3505                 return 0;
3506
3507         if (ptr == (unsigned long *)1) {
3508                 seq_printf(m, "#### all functions enabled ####\n");
3509                 return 0;
3510         }
3511
3512         seq_printf(m, "%ps\n", (void *)*ptr);
3513
3514         return 0;
3515 }
3516
3517 static const struct seq_operations ftrace_graph_seq_ops = {
3518         .start = g_start,
3519         .next = g_next,
3520         .stop = g_stop,
3521         .show = g_show,
3522 };
3523
3524 static int
3525 ftrace_graph_open(struct inode *inode, struct file *file)
3526 {
3527         int ret = 0;
3528
3529         if (unlikely(ftrace_disabled))
3530                 return -ENODEV;
3531
3532         mutex_lock(&graph_lock);
3533         if ((file->f_mode & FMODE_WRITE) &&
3534             (file->f_flags & O_TRUNC)) {
3535                 ftrace_graph_filter_enabled = 0;
3536                 ftrace_graph_count = 0;
3537                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3538         }
3539         mutex_unlock(&graph_lock);
3540
3541         if (file->f_mode & FMODE_READ)
3542                 ret = seq_open(file, &ftrace_graph_seq_ops);
3543
3544         return ret;
3545 }
3546
3547 static int
3548 ftrace_graph_release(struct inode *inode, struct file *file)
3549 {
3550         if (file->f_mode & FMODE_READ)
3551                 seq_release(inode, file);
3552         return 0;
3553 }
3554
3555 static int
3556 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3557 {
3558         struct dyn_ftrace *rec;
3559         struct ftrace_page *pg;
3560         int search_len;
3561         int fail = 1;
3562         int type, not;
3563         char *search;
3564         bool exists;
3565         int i;
3566
3567         /* decode regex */
3568         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3569         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3570                 return -EBUSY;
3571
3572         search_len = strlen(search);
3573
3574         mutex_lock(&ftrace_lock);
3575
3576         if (unlikely(ftrace_disabled)) {
3577                 mutex_unlock(&ftrace_lock);
3578                 return -ENODEV;
3579         }
3580
3581         do_for_each_ftrace_rec(pg, rec) {
3582
3583                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3584                         /* if it is in the array */
3585                         exists = false;
3586                         for (i = 0; i < *idx; i++) {
3587                                 if (array[i] == rec->ip) {
3588                                         exists = true;
3589                                         break;
3590                                 }
3591                         }
3592
3593                         if (!not) {
3594                                 fail = 0;
3595                                 if (!exists) {
3596                                         array[(*idx)++] = rec->ip;
3597                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3598                                                 goto out;
3599                                 }
3600                         } else {
3601                                 if (exists) {
3602                                         array[i] = array[--(*idx)];
3603                                         array[*idx] = 0;
3604                                         fail = 0;
3605                                 }
3606                         }
3607                 }
3608         } while_for_each_ftrace_rec();
3609 out:
3610         mutex_unlock(&ftrace_lock);
3611
3612         if (fail)
3613                 return -EINVAL;
3614
3615         ftrace_graph_filter_enabled = 1;
3616         return 0;
3617 }
3618
3619 static ssize_t
3620 ftrace_graph_write(struct file *file, const char __user *ubuf,
3621                    size_t cnt, loff_t *ppos)
3622 {
3623         struct trace_parser parser;
3624         ssize_t read, ret;
3625
3626         if (!cnt)
3627                 return 0;
3628
3629         mutex_lock(&graph_lock);
3630
3631         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3632                 ret = -ENOMEM;
3633                 goto out_unlock;
3634         }
3635
3636         read = trace_get_user(&parser, ubuf, cnt, ppos);
3637
3638         if (read >= 0 && trace_parser_loaded((&parser))) {
3639                 parser.buffer[parser.idx] = 0;
3640
3641                 /* we allow only one expression at a time */
3642                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3643                                         parser.buffer);
3644                 if (ret)
3645                         goto out_free;
3646         }
3647
3648         ret = read;
3649
3650 out_free:
3651         trace_parser_put(&parser);
3652 out_unlock:
3653         mutex_unlock(&graph_lock);
3654
3655         return ret;
3656 }
3657
3658 static const struct file_operations ftrace_graph_fops = {
3659         .open           = ftrace_graph_open,
3660         .read           = seq_read,
3661         .write          = ftrace_graph_write,
3662         .release        = ftrace_graph_release,
3663         .llseek         = seq_lseek,
3664 };
3665 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3666
3667 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3668 {
3669
3670         trace_create_file("available_filter_functions", 0444,
3671                         d_tracer, NULL, &ftrace_avail_fops);
3672
3673         trace_create_file("enabled_functions", 0444,
3674                         d_tracer, NULL, &ftrace_enabled_fops);
3675
3676         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3677                         NULL, &ftrace_filter_fops);
3678
3679         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3680                                     NULL, &ftrace_notrace_fops);
3681
3682 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3683         trace_create_file("set_graph_function", 0444, d_tracer,
3684                                     NULL,
3685                                     &ftrace_graph_fops);
3686 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3687
3688         return 0;
3689 }
3690
3691 static void ftrace_swap_recs(void *a, void *b, int size)
3692 {
3693         struct dyn_ftrace *reca = a;
3694         struct dyn_ftrace *recb = b;
3695         struct dyn_ftrace t;
3696
3697         t = *reca;
3698         *reca = *recb;
3699         *recb = t;
3700 }
3701
3702 static int ftrace_process_locs(struct module *mod,
3703                                unsigned long *start,
3704                                unsigned long *end)
3705 {
3706         struct ftrace_page *pg;
3707         unsigned long count;
3708         unsigned long *p;
3709         unsigned long addr;
3710         unsigned long flags = 0; /* Shut up gcc */
3711         int ret = -ENOMEM;
3712
3713         count = end - start;
3714
3715         if (!count)
3716                 return 0;
3717
3718         pg = ftrace_allocate_pages(count);
3719         if (!pg)
3720                 return -ENOMEM;
3721
3722         mutex_lock(&ftrace_lock);
3723
3724         /*
3725          * Core and each module needs their own pages, as
3726          * modules will free them when they are removed.
3727          * Force a new page to be allocated for modules.
3728          */
3729         if (!mod) {
3730                 WARN_ON(ftrace_pages || ftrace_pages_start);
3731                 /* First initialization */
3732                 ftrace_pages = ftrace_pages_start = pg;
3733         } else {
3734                 if (!ftrace_pages)
3735                         goto out;
3736
3737                 if (WARN_ON(ftrace_pages->next)) {
3738                         /* Hmm, we have free pages? */
3739                         while (ftrace_pages->next)
3740                                 ftrace_pages = ftrace_pages->next;
3741                 }
3742
3743                 ftrace_pages->next = pg;
3744                 ftrace_pages = pg;
3745         }
3746
3747         p = start;
3748         while (p < end) {
3749                 addr = ftrace_call_adjust(*p++);
3750                 /*
3751                  * Some architecture linkers will pad between
3752                  * the different mcount_loc sections of different
3753                  * object files to satisfy alignments.
3754                  * Skip any NULL pointers.
3755                  */
3756                 if (!addr)
3757                         continue;
3758                 if (!ftrace_record_ip(addr))
3759                         break;
3760         }
3761
3762         /* These new locations need to be initialized */
3763         ftrace_new_pgs = pg;
3764
3765         /* Make each individual set of pages sorted by ips */
3766         for (; pg; pg = pg->next)
3767                 sort(pg->records, pg->index, sizeof(struct dyn_ftrace),
3768                      ftrace_cmp_recs, ftrace_swap_recs);
3769
3770         /*
3771          * We only need to disable interrupts on start up
3772          * because we are modifying code that an interrupt
3773          * may execute, and the modification is not atomic.
3774          * But for modules, nothing runs the code we modify
3775          * until we are finished with it, and there's no
3776          * reason to cause large interrupt latencies while we do it.
3777          */
3778         if (!mod)
3779                 local_irq_save(flags);
3780         ftrace_update_code(mod);
3781         if (!mod)
3782                 local_irq_restore(flags);
3783         ret = 0;
3784  out:
3785         mutex_unlock(&ftrace_lock);
3786
3787         return ret;
3788 }
3789
3790 #ifdef CONFIG_MODULES
3791
3792 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
3793
3794 void ftrace_release_mod(struct module *mod)
3795 {
3796         struct dyn_ftrace *rec;
3797         struct ftrace_page **last_pg;
3798         struct ftrace_page *pg;
3799         int order;
3800
3801         mutex_lock(&ftrace_lock);
3802
3803         if (ftrace_disabled)
3804                 goto out_unlock;
3805
3806         /*
3807          * Each module has its own ftrace_pages, remove
3808          * them from the list.
3809          */
3810         last_pg = &ftrace_pages_start;
3811         for (pg = ftrace_pages_start; pg; pg = *last_pg) {
3812                 rec = &pg->records[0];
3813                 if (within_module_core(rec->ip, mod)) {
3814                         /*
3815                          * As core pages are first, the first
3816                          * page should never be a module page.
3817                          */
3818                         if (WARN_ON(pg == ftrace_pages_start))
3819                                 goto out_unlock;
3820
3821                         /* Check if we are deleting the last page */
3822                         if (pg == ftrace_pages)
3823                                 ftrace_pages = next_to_ftrace_page(last_pg);
3824
3825                         *last_pg = pg->next;
3826                         order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3827                         free_pages((unsigned long)pg->records, order);
3828                         kfree(pg);
3829                 } else
3830                         last_pg = &pg->next;
3831         }
3832  out_unlock:
3833         mutex_unlock(&ftrace_lock);
3834 }
3835
3836 static void ftrace_init_module(struct module *mod,
3837                                unsigned long *start, unsigned long *end)
3838 {
3839         if (ftrace_disabled || start == end)
3840                 return;
3841         ftrace_process_locs(mod, start, end);
3842 }
3843
3844 static int ftrace_module_notify(struct notifier_block *self,
3845                                 unsigned long val, void *data)
3846 {
3847         struct module *mod = data;
3848
3849         switch (val) {
3850         case MODULE_STATE_COMING:
3851                 ftrace_init_module(mod, mod->ftrace_callsites,
3852                                    mod->ftrace_callsites +
3853                                    mod->num_ftrace_callsites);
3854                 break;
3855         case MODULE_STATE_GOING:
3856                 ftrace_release_mod(mod);
3857                 break;
3858         }
3859
3860         return 0;
3861 }
3862 #else
3863 static int ftrace_module_notify(struct notifier_block *self,
3864                                 unsigned long val, void *data)
3865 {
3866         return 0;
3867 }
3868 #endif /* CONFIG_MODULES */
3869
3870 struct notifier_block ftrace_module_nb = {
3871         .notifier_call = ftrace_module_notify,
3872         .priority = 0,
3873 };
3874
3875 extern unsigned long __start_mcount_loc[];
3876 extern unsigned long __stop_mcount_loc[];
3877
3878 void __init ftrace_init(void)
3879 {
3880         unsigned long count, addr, flags;
3881         int ret;
3882
3883         /* Keep the ftrace pointer to the stub */
3884         addr = (unsigned long)ftrace_stub;
3885
3886         local_irq_save(flags);
3887         ftrace_dyn_arch_init(&addr);
3888         local_irq_restore(flags);
3889
3890         /* ftrace_dyn_arch_init places the return code in addr */
3891         if (addr)
3892                 goto failed;
3893
3894         count = __stop_mcount_loc - __start_mcount_loc;
3895
3896         ret = ftrace_dyn_table_alloc(count);
3897         if (ret)
3898                 goto failed;
3899
3900         last_ftrace_enabled = ftrace_enabled = 1;
3901
3902         ret = ftrace_process_locs(NULL,
3903                                   __start_mcount_loc,
3904                                   __stop_mcount_loc);
3905
3906         ret = register_module_notifier(&ftrace_module_nb);
3907         if (ret)
3908                 pr_warning("Failed to register trace ftrace module notifier\n");
3909
3910         set_ftrace_early_filters();
3911
3912         return;
3913  failed:
3914         ftrace_disabled = 1;
3915 }
3916
3917 #else
3918
3919 static struct ftrace_ops global_ops = {
3920         .func                   = ftrace_stub,
3921 };
3922
3923 static int __init ftrace_nodyn_init(void)
3924 {
3925         ftrace_enabled = 1;
3926         return 0;
3927 }
3928 device_initcall(ftrace_nodyn_init);
3929
3930 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3931 static inline void ftrace_startup_enable(int command) { }
3932 /* Keep as macros so we do not need to define the commands */
3933 # define ftrace_startup(ops, command)                   \
3934         ({                                              \
3935                 (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
3936                 0;                                      \
3937         })
3938 # define ftrace_shutdown(ops, command)  do { } while (0)
3939 # define ftrace_startup_sysctl()        do { } while (0)
3940 # define ftrace_shutdown_sysctl()       do { } while (0)
3941
3942 static inline int
3943 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3944 {
3945         return 1;
3946 }
3947
3948 #endif /* CONFIG_DYNAMIC_FTRACE */
3949
3950 static void
3951 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
3952 {
3953         struct ftrace_ops *op;
3954
3955         if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
3956                 return;
3957
3958         /*
3959          * Some of the ops may be dynamically allocated,
3960          * they must be freed after a synchronize_sched().
3961          */
3962         preempt_disable_notrace();
3963         trace_recursion_set(TRACE_CONTROL_BIT);
3964         op = rcu_dereference_raw(ftrace_control_list);
3965         while (op != &ftrace_list_end) {
3966                 if (!ftrace_function_local_disabled(op) &&
3967                     ftrace_ops_test(op, ip))
3968                         op->func(ip, parent_ip);
3969
3970                 op = rcu_dereference_raw(op->next);
3971         };
3972         trace_recursion_clear(TRACE_CONTROL_BIT);
3973         preempt_enable_notrace();
3974 }
3975
3976 static struct ftrace_ops control_ops = {
3977         .func = ftrace_ops_control_func,
3978 };
3979
3980 static void
3981 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3982 {
3983         struct ftrace_ops *op;
3984
3985         if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3986                 return;
3987
3988         trace_recursion_set(TRACE_INTERNAL_BIT);
3989         /*
3990          * Some of the ops may be dynamically allocated,
3991          * they must be freed after a synchronize_sched().
3992          */
3993         preempt_disable_notrace();
3994         op = rcu_dereference_raw(ftrace_ops_list);
3995         while (op != &ftrace_list_end) {
3996                 if (ftrace_ops_test(op, ip))
3997                         op->func(ip, parent_ip);
3998                 op = rcu_dereference_raw(op->next);
3999         };
4000         preempt_enable_notrace();
4001         trace_recursion_clear(TRACE_INTERNAL_BIT);
4002 }
4003
4004 static void clear_ftrace_swapper(void)
4005 {
4006         struct task_struct *p;
4007         int cpu;
4008
4009         get_online_cpus();
4010         for_each_online_cpu(cpu) {
4011                 p = idle_task(cpu);
4012                 clear_tsk_trace_trace(p);
4013         }
4014         put_online_cpus();
4015 }
4016
4017 static void set_ftrace_swapper(void)
4018 {
4019         struct task_struct *p;
4020         int cpu;
4021
4022         get_online_cpus();
4023         for_each_online_cpu(cpu) {
4024                 p = idle_task(cpu);
4025                 set_tsk_trace_trace(p);
4026         }
4027         put_online_cpus();
4028 }
4029
4030 static void clear_ftrace_pid(struct pid *pid)
4031 {
4032         struct task_struct *p;
4033
4034         rcu_read_lock();
4035         do_each_pid_task(pid, PIDTYPE_PID, p) {
4036                 clear_tsk_trace_trace(p);
4037         } while_each_pid_task(pid, PIDTYPE_PID, p);
4038         rcu_read_unlock();
4039
4040         put_pid(pid);
4041 }
4042
4043 static void set_ftrace_pid(struct pid *pid)
4044 {
4045         struct task_struct *p;
4046
4047         rcu_read_lock();
4048         do_each_pid_task(pid, PIDTYPE_PID, p) {
4049                 set_tsk_trace_trace(p);
4050         } while_each_pid_task(pid, PIDTYPE_PID, p);
4051         rcu_read_unlock();
4052 }
4053
4054 static void clear_ftrace_pid_task(struct pid *pid)
4055 {
4056         if (pid == ftrace_swapper_pid)
4057                 clear_ftrace_swapper();
4058         else
4059                 clear_ftrace_pid(pid);
4060 }
4061
4062 static void set_ftrace_pid_task(struct pid *pid)
4063 {
4064         if (pid == ftrace_swapper_pid)
4065                 set_ftrace_swapper();
4066         else
4067                 set_ftrace_pid(pid);
4068 }
4069
4070 static int ftrace_pid_add(int p)
4071 {
4072         struct pid *pid;
4073         struct ftrace_pid *fpid;
4074         int ret = -EINVAL;
4075
4076         mutex_lock(&ftrace_lock);
4077
4078         if (!p)
4079                 pid = ftrace_swapper_pid;
4080         else
4081                 pid = find_get_pid(p);
4082
4083         if (!pid)
4084                 goto out;
4085
4086         ret = 0;
4087
4088         list_for_each_entry(fpid, &ftrace_pids, list)
4089                 if (fpid->pid == pid)
4090                         goto out_put;
4091
4092         ret = -ENOMEM;
4093
4094         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4095         if (!fpid)
4096                 goto out_put;
4097
4098         list_add(&fpid->list, &ftrace_pids);
4099         fpid->pid = pid;
4100
4101         set_ftrace_pid_task(pid);
4102
4103         ftrace_update_pid_func();
4104         ftrace_startup_enable(0);
4105
4106         mutex_unlock(&ftrace_lock);
4107         return 0;
4108
4109 out_put:
4110         if (pid != ftrace_swapper_pid)
4111                 put_pid(pid);
4112
4113 out:
4114         mutex_unlock(&ftrace_lock);
4115         return ret;
4116 }
4117
4118 static void ftrace_pid_reset(void)
4119 {
4120         struct ftrace_pid *fpid, *safe;
4121
4122         mutex_lock(&ftrace_lock);
4123         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4124                 struct pid *pid = fpid->pid;
4125
4126                 clear_ftrace_pid_task(pid);
4127
4128                 list_del(&fpid->list);
4129                 kfree(fpid);
4130         }
4131
4132         ftrace_update_pid_func();
4133         ftrace_startup_enable(0);
4134
4135         mutex_unlock(&ftrace_lock);
4136 }
4137
4138 static void *fpid_start(struct seq_file *m, loff_t *pos)
4139 {
4140         mutex_lock(&ftrace_lock);
4141
4142         if (list_empty(&ftrace_pids) && (!*pos))
4143                 return (void *) 1;
4144
4145         return seq_list_start(&ftrace_pids, *pos);
4146 }
4147
4148 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4149 {
4150         if (v == (void *)1)
4151                 return NULL;
4152
4153         return seq_list_next(v, &ftrace_pids, pos);
4154 }
4155
4156 static void fpid_stop(struct seq_file *m, void *p)
4157 {
4158         mutex_unlock(&ftrace_lock);
4159 }
4160
4161 static int fpid_show(struct seq_file *m, void *v)
4162 {
4163         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4164
4165         if (v == (void *)1) {
4166                 seq_printf(m, "no pid\n");
4167                 return 0;
4168         }
4169
4170         if (fpid->pid == ftrace_swapper_pid)
4171                 seq_printf(m, "swapper tasks\n");
4172         else
4173                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4174
4175         return 0;
4176 }
4177
4178 static const struct seq_operations ftrace_pid_sops = {
4179         .start = fpid_start,
4180         .next = fpid_next,
4181         .stop = fpid_stop,
4182         .show = fpid_show,
4183 };
4184
4185 static int
4186 ftrace_pid_open(struct inode *inode, struct file *file)
4187 {
4188         int ret = 0;
4189
4190         if ((file->f_mode & FMODE_WRITE) &&
4191             (file->f_flags & O_TRUNC))
4192                 ftrace_pid_reset();
4193
4194         if (file->f_mode & FMODE_READ)
4195                 ret = seq_open(file, &ftrace_pid_sops);
4196
4197         return ret;
4198 }
4199
4200 static ssize_t
4201 ftrace_pid_write(struct file *filp, const char __user *ubuf,
4202                    size_t cnt, loff_t *ppos)
4203 {
4204         char buf[64], *tmp;
4205         long val;
4206         int ret;
4207
4208         if (cnt >= sizeof(buf))
4209                 return -EINVAL;
4210
4211         if (copy_from_user(&buf, ubuf, cnt))
4212                 return -EFAULT;
4213
4214         buf[cnt] = 0;
4215
4216         /*
4217          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4218          * to clean the filter quietly.
4219          */
4220         tmp = strstrip(buf);
4221         if (strlen(tmp) == 0)
4222                 return 1;
4223
4224         ret = strict_strtol(tmp, 10, &val);
4225         if (ret < 0)
4226                 return ret;
4227
4228         ret = ftrace_pid_add(val);
4229
4230         return ret ? ret : cnt;
4231 }
4232
4233 static int
4234 ftrace_pid_release(struct inode *inode, struct file *file)
4235 {
4236         if (file->f_mode & FMODE_READ)
4237                 seq_release(inode, file);
4238
4239         return 0;
4240 }
4241
4242 static const struct file_operations ftrace_pid_fops = {
4243         .open           = ftrace_pid_open,
4244         .write          = ftrace_pid_write,
4245         .read           = seq_read,
4246         .llseek         = seq_lseek,
4247         .release        = ftrace_pid_release,
4248 };
4249
4250 static __init int ftrace_init_debugfs(void)
4251 {
4252         struct dentry *d_tracer;
4253
4254         d_tracer = tracing_init_dentry();
4255         if (!d_tracer)
4256                 return 0;
4257
4258         ftrace_init_dyn_debugfs(d_tracer);
4259
4260         trace_create_file("set_ftrace_pid", 0644, d_tracer,
4261                             NULL, &ftrace_pid_fops);
4262
4263         ftrace_profile_debugfs(d_tracer);
4264
4265         return 0;
4266 }
4267 fs_initcall(ftrace_init_debugfs);
4268
4269 /**
4270  * ftrace_kill - kill ftrace
4271  *
4272  * This function should be used by panic code. It stops ftrace
4273  * but in a not so nice way. If you need to simply kill ftrace
4274  * from a non-atomic section, use ftrace_kill.
4275  */
4276 void ftrace_kill(void)
4277 {
4278         ftrace_disabled = 1;
4279         ftrace_enabled = 0;
4280         clear_ftrace_function();
4281 }
4282
4283 /**
4284  * Test if ftrace is dead or not.
4285  */
4286 int ftrace_is_dead(void)
4287 {
4288         return ftrace_disabled;
4289 }
4290
4291 /**
4292  * register_ftrace_function - register a function for profiling
4293  * @ops - ops structure that holds the function for profiling.
4294  *
4295  * Register a function to be called by all functions in the
4296  * kernel.
4297  *
4298  * Note: @ops->func and all the functions it calls must be labeled
4299  *       with "notrace", otherwise it will go into a
4300  *       recursive loop.
4301  */
4302 int register_ftrace_function(struct ftrace_ops *ops)
4303 {
4304         int ret = -1;
4305
4306         mutex_lock(&ftrace_lock);
4307
4308         if (unlikely(ftrace_disabled))
4309                 goto out_unlock;
4310
4311         ret = __register_ftrace_function(ops);
4312         if (!ret)
4313                 ret = ftrace_startup(ops, 0);
4314
4315
4316  out_unlock:
4317         mutex_unlock(&ftrace_lock);
4318         return ret;
4319 }
4320 EXPORT_SYMBOL_GPL(register_ftrace_function);
4321
4322 /**
4323  * unregister_ftrace_function - unregister a function for profiling.
4324  * @ops - ops structure that holds the function to unregister
4325  *
4326  * Unregister a function that was added to be called by ftrace profiling.
4327  */
4328 int unregister_ftrace_function(struct ftrace_ops *ops)
4329 {
4330         int ret;
4331
4332         mutex_lock(&ftrace_lock);
4333         ret = __unregister_ftrace_function(ops);
4334         if (!ret)
4335                 ftrace_shutdown(ops, 0);
4336         mutex_unlock(&ftrace_lock);
4337
4338         return ret;
4339 }
4340 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4341
4342 int
4343 ftrace_enable_sysctl(struct ctl_table *table, int write,
4344                      void __user *buffer, size_t *lenp,
4345                      loff_t *ppos)
4346 {
4347         int ret = -ENODEV;
4348
4349         mutex_lock(&ftrace_lock);
4350
4351         if (unlikely(ftrace_disabled))
4352                 goto out;
4353
4354         ret = proc_dointvec(table, write, buffer, lenp, ppos);
4355
4356         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4357                 goto out;
4358
4359         last_ftrace_enabled = !!ftrace_enabled;
4360
4361         if (ftrace_enabled) {
4362
4363                 ftrace_startup_sysctl();
4364
4365                 /* we are starting ftrace again */
4366                 if (ftrace_ops_list != &ftrace_list_end) {
4367                         if (ftrace_ops_list->next == &ftrace_list_end)
4368                                 ftrace_trace_function = ftrace_ops_list->func;
4369                         else
4370                                 ftrace_trace_function = ftrace_ops_list_func;
4371                 }
4372
4373         } else {
4374                 /* stopping ftrace calls (just send to ftrace_stub) */
4375                 ftrace_trace_function = ftrace_stub;
4376
4377                 ftrace_shutdown_sysctl();
4378         }
4379
4380  out:
4381         mutex_unlock(&ftrace_lock);
4382         return ret;
4383 }
4384
4385 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4386
4387 static int ftrace_graph_active;
4388 static struct notifier_block ftrace_suspend_notifier;
4389
4390 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4391 {
4392         return 0;
4393 }
4394
4395 /* The callbacks that hook a function */
4396 trace_func_graph_ret_t ftrace_graph_return =
4397                         (trace_func_graph_ret_t)ftrace_stub;
4398 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4399
4400 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4401 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4402 {
4403         int i;
4404         int ret = 0;
4405         unsigned long flags;
4406         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4407         struct task_struct *g, *t;
4408
4409         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4410                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4411                                         * sizeof(struct ftrace_ret_stack),
4412                                         GFP_KERNEL);
4413                 if (!ret_stack_list[i]) {
4414                         start = 0;
4415                         end = i;
4416                         ret = -ENOMEM;
4417                         goto free;
4418                 }
4419         }
4420
4421         read_lock_irqsave(&tasklist_lock, flags);
4422         do_each_thread(g, t) {
4423                 if (start == end) {
4424                         ret = -EAGAIN;
4425                         goto unlock;
4426                 }
4427
4428                 if (t->ret_stack == NULL) {
4429                         atomic_set(&t->tracing_graph_pause, 0);
4430                         atomic_set(&t->trace_overrun, 0);
4431                         t->curr_ret_stack = -1;
4432                         /* Make sure the tasks see the -1 first: */
4433                         smp_wmb();
4434                         t->ret_stack = ret_stack_list[start++];
4435                 }
4436         } while_each_thread(g, t);
4437
4438 unlock:
4439         read_unlock_irqrestore(&tasklist_lock, flags);
4440 free:
4441         for (i = start; i < end; i++)
4442                 kfree(ret_stack_list[i]);
4443         return ret;
4444 }
4445
4446 static void
4447 ftrace_graph_probe_sched_switch(void *ignore,
4448                         struct task_struct *prev, struct task_struct *next)
4449 {
4450         unsigned long long timestamp;
4451         int index;
4452
4453         /*
4454          * Does the user want to count the time a function was asleep.
4455          * If so, do not update the time stamps.
4456          */
4457         if (trace_flags & TRACE_ITER_SLEEP_TIME)
4458                 return;
4459
4460         timestamp = trace_clock_local();
4461
4462         prev->ftrace_timestamp = timestamp;
4463
4464         /* only process tasks that we timestamped */
4465         if (!next->ftrace_timestamp)
4466                 return;
4467
4468         /*
4469          * Update all the counters in next to make up for the
4470          * time next was sleeping.
4471          */
4472         timestamp -= next->ftrace_timestamp;
4473
4474         for (index = next->curr_ret_stack; index >= 0; index--)
4475                 next->ret_stack[index].calltime += timestamp;
4476 }
4477
4478 /* Allocate a return stack for each task */
4479 static int start_graph_tracing(void)
4480 {
4481         struct ftrace_ret_stack **ret_stack_list;
4482         int ret, cpu;
4483
4484         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4485                                 sizeof(struct ftrace_ret_stack *),
4486                                 GFP_KERNEL);
4487
4488         if (!ret_stack_list)
4489                 return -ENOMEM;
4490
4491         /* The cpu_boot init_task->ret_stack will never be freed */
4492         for_each_online_cpu(cpu) {
4493                 if (!idle_task(cpu)->ret_stack)
4494                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4495         }
4496
4497         do {
4498                 ret = alloc_retstack_tasklist(ret_stack_list);
4499         } while (ret == -EAGAIN);
4500
4501         if (!ret) {
4502                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4503                 if (ret)
4504                         pr_info("ftrace_graph: Couldn't activate tracepoint"
4505                                 " probe to kernel_sched_switch\n");
4506         }
4507
4508         kfree(ret_stack_list);
4509         return ret;
4510 }
4511
4512 /*
4513  * Hibernation protection.
4514  * The state of the current task is too much unstable during
4515  * suspend/restore to disk. We want to protect against that.
4516  */
4517 static int
4518 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4519                                                         void *unused)
4520 {
4521         switch (state) {
4522         case PM_HIBERNATION_PREPARE:
4523                 pause_graph_tracing();
4524                 break;
4525
4526         case PM_POST_HIBERNATION:
4527                 unpause_graph_tracing();
4528                 break;
4529         }
4530         return NOTIFY_DONE;
4531 }
4532
4533 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4534                         trace_func_graph_ent_t entryfunc)
4535 {
4536         int ret = 0;
4537
4538         mutex_lock(&ftrace_lock);
4539
4540         /* we currently allow only one tracer registered at a time */
4541         if (ftrace_graph_active) {
4542                 ret = -EBUSY;
4543                 goto out;
4544         }
4545
4546         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4547         register_pm_notifier(&ftrace_suspend_notifier);
4548
4549         ftrace_graph_active++;
4550         ret = start_graph_tracing();
4551         if (ret) {
4552                 ftrace_graph_active--;
4553                 goto out;
4554         }
4555
4556         ftrace_graph_return = retfunc;
4557         ftrace_graph_entry = entryfunc;
4558
4559         ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4560
4561 out:
4562         mutex_unlock(&ftrace_lock);
4563         return ret;
4564 }
4565
4566 void unregister_ftrace_graph(void)
4567 {
4568         mutex_lock(&ftrace_lock);
4569
4570         if (unlikely(!ftrace_graph_active))
4571                 goto out;
4572
4573         ftrace_graph_active--;
4574         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4575         ftrace_graph_entry = ftrace_graph_entry_stub;
4576         ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4577         unregister_pm_notifier(&ftrace_suspend_notifier);
4578         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4579
4580  out:
4581         mutex_unlock(&ftrace_lock);
4582 }
4583
4584 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4585
4586 static void
4587 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4588 {
4589         atomic_set(&t->tracing_graph_pause, 0);
4590         atomic_set(&t->trace_overrun, 0);
4591         t->ftrace_timestamp = 0;
4592         /* make curr_ret_stack visible before we add the ret_stack */
4593         smp_wmb();
4594         t->ret_stack = ret_stack;
4595 }
4596
4597 /*
4598  * Allocate a return stack for the idle task. May be the first
4599  * time through, or it may be done by CPU hotplug online.
4600  */
4601 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4602 {
4603         t->curr_ret_stack = -1;
4604         /*
4605          * The idle task has no parent, it either has its own
4606          * stack or no stack at all.
4607          */
4608         if (t->ret_stack)
4609                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4610
4611         if (ftrace_graph_active) {
4612                 struct ftrace_ret_stack *ret_stack;
4613
4614                 ret_stack = per_cpu(idle_ret_stack, cpu);
4615                 if (!ret_stack) {
4616                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4617                                             * sizeof(struct ftrace_ret_stack),
4618                                             GFP_KERNEL);
4619                         if (!ret_stack)
4620                                 return;
4621                         per_cpu(idle_ret_stack, cpu) = ret_stack;
4622                 }
4623                 graph_init_task(t, ret_stack);
4624         }
4625 }
4626
4627 /* Allocate a return stack for newly created task */
4628 void ftrace_graph_init_task(struct task_struct *t)
4629 {
4630         /* Make sure we do not use the parent ret_stack */
4631         t->ret_stack = NULL;
4632         t->curr_ret_stack = -1;
4633
4634         if (ftrace_graph_active) {
4635                 struct ftrace_ret_stack *ret_stack;
4636
4637                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4638                                 * sizeof(struct ftrace_ret_stack),
4639                                 GFP_KERNEL);
4640                 if (!ret_stack)
4641                         return;
4642                 graph_init_task(t, ret_stack);
4643         }
4644 }
4645
4646 void ftrace_graph_exit_task(struct task_struct *t)
4647 {
4648         struct ftrace_ret_stack *ret_stack = t->ret_stack;
4649
4650         t->ret_stack = NULL;
4651         /* NULL must become visible to IRQs before we free it: */
4652         barrier();
4653
4654         kfree(ret_stack);
4655 }
4656
4657 void ftrace_graph_stop(void)
4658 {
4659         ftrace_stop();
4660 }
4661 #endif