]> git.karo-electronics.de Git - mv-sheeva.git/blob - kernel/trace/ftrace.c
ftrace: add debugfs entry 'failures'
[mv-sheeva.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/ftrace.h>
25 #include <linux/sysctl.h>
26 #include <linux/ctype.h>
27 #include <linux/hash.h>
28 #include <linux/list.h>
29
30 #include "trace.h"
31
32 /* ftrace_enabled is a method to turn ftrace on or off */
33 int ftrace_enabled __read_mostly;
34 static int last_ftrace_enabled;
35
36 /*
37  * ftrace_disabled is set when an anomaly is discovered.
38  * ftrace_disabled is much stronger than ftrace_enabled.
39  */
40 static int ftrace_disabled __read_mostly;
41
42 static DEFINE_SPINLOCK(ftrace_lock);
43 static DEFINE_MUTEX(ftrace_sysctl_lock);
44
45 static struct ftrace_ops ftrace_list_end __read_mostly =
46 {
47         .func = ftrace_stub,
48 };
49
50 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
51 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
52
53 void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
54 {
55         struct ftrace_ops *op = ftrace_list;
56
57         /* in case someone actually ports this to alpha! */
58         read_barrier_depends();
59
60         while (op != &ftrace_list_end) {
61                 /* silly alpha */
62                 read_barrier_depends();
63                 op->func(ip, parent_ip);
64                 op = op->next;
65         };
66 }
67
68 /**
69  * clear_ftrace_function - reset the ftrace function
70  *
71  * This NULLs the ftrace function and in essence stops
72  * tracing.  There may be lag
73  */
74 void clear_ftrace_function(void)
75 {
76         ftrace_trace_function = ftrace_stub;
77 }
78
79 static int __register_ftrace_function(struct ftrace_ops *ops)
80 {
81         /* Should never be called by interrupts */
82         spin_lock(&ftrace_lock);
83
84         ops->next = ftrace_list;
85         /*
86          * We are entering ops into the ftrace_list but another
87          * CPU might be walking that list. We need to make sure
88          * the ops->next pointer is valid before another CPU sees
89          * the ops pointer included into the ftrace_list.
90          */
91         smp_wmb();
92         ftrace_list = ops;
93
94         if (ftrace_enabled) {
95                 /*
96                  * For one func, simply call it directly.
97                  * For more than one func, call the chain.
98                  */
99                 if (ops->next == &ftrace_list_end)
100                         ftrace_trace_function = ops->func;
101                 else
102                         ftrace_trace_function = ftrace_list_func;
103         }
104
105         spin_unlock(&ftrace_lock);
106
107         return 0;
108 }
109
110 static int __unregister_ftrace_function(struct ftrace_ops *ops)
111 {
112         struct ftrace_ops **p;
113         int ret = 0;
114
115         spin_lock(&ftrace_lock);
116
117         /*
118          * If we are removing the last function, then simply point
119          * to the ftrace_stub.
120          */
121         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
122                 ftrace_trace_function = ftrace_stub;
123                 ftrace_list = &ftrace_list_end;
124                 goto out;
125         }
126
127         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
128                 if (*p == ops)
129                         break;
130
131         if (*p != ops) {
132                 ret = -1;
133                 goto out;
134         }
135
136         *p = (*p)->next;
137
138         if (ftrace_enabled) {
139                 /* If we only have one func left, then call that directly */
140                 if (ftrace_list == &ftrace_list_end ||
141                     ftrace_list->next == &ftrace_list_end)
142                         ftrace_trace_function = ftrace_list->func;
143         }
144
145  out:
146         spin_unlock(&ftrace_lock);
147
148         return ret;
149 }
150
151 #ifdef CONFIG_DYNAMIC_FTRACE
152
153 static struct task_struct *ftraced_task;
154
155 enum {
156         FTRACE_ENABLE_CALLS             = (1 << 0),
157         FTRACE_DISABLE_CALLS            = (1 << 1),
158         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
159         FTRACE_ENABLE_MCOUNT            = (1 << 3),
160         FTRACE_DISABLE_MCOUNT           = (1 << 4),
161 };
162
163 static int ftrace_filtered;
164
165 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
166
167 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
168
169 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
170 static DEFINE_MUTEX(ftraced_lock);
171 static DEFINE_MUTEX(ftrace_regex_lock);
172
173 struct ftrace_page {
174         struct ftrace_page      *next;
175         unsigned long           index;
176         struct dyn_ftrace       records[];
177 };
178
179 #define ENTRIES_PER_PAGE \
180   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
181
182 /* estimate from running different kernels */
183 #define NR_TO_INIT              10000
184
185 static struct ftrace_page       *ftrace_pages_start;
186 static struct ftrace_page       *ftrace_pages;
187
188 static int ftraced_trigger;
189 static int ftraced_suspend;
190 static int ftraced_stop;
191
192 static int ftrace_record_suspend;
193
194 static struct dyn_ftrace *ftrace_free_records;
195
196 static inline int
197 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
198 {
199         struct dyn_ftrace *p;
200         struct hlist_node *t;
201         int found = 0;
202
203         hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
204                 if (p->ip == ip) {
205                         found = 1;
206                         break;
207                 }
208         }
209
210         return found;
211 }
212
213 static inline void
214 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
215 {
216         hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
217 }
218
219 /* called from kstop_machine */
220 static inline void ftrace_del_hash(struct dyn_ftrace *node)
221 {
222         hlist_del(&node->node);
223 }
224
225 static void ftrace_free_rec(struct dyn_ftrace *rec)
226 {
227         /* no locking, only called from kstop_machine */
228
229         rec->ip = (unsigned long)ftrace_free_records;
230         ftrace_free_records = rec;
231         rec->flags |= FTRACE_FL_FREE;
232 }
233
234 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
235 {
236         struct dyn_ftrace *rec;
237
238         /* First check for freed records */
239         if (ftrace_free_records) {
240                 rec = ftrace_free_records;
241
242                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
243                         WARN_ON_ONCE(1);
244                         ftrace_free_records = NULL;
245                         ftrace_disabled = 1;
246                         ftrace_enabled = 0;
247                         return NULL;
248                 }
249
250                 ftrace_free_records = (void *)rec->ip;
251                 memset(rec, 0, sizeof(*rec));
252                 return rec;
253         }
254
255         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
256                 if (!ftrace_pages->next)
257                         return NULL;
258                 ftrace_pages = ftrace_pages->next;
259         }
260
261         return &ftrace_pages->records[ftrace_pages->index++];
262 }
263
264 static void
265 ftrace_record_ip(unsigned long ip)
266 {
267         struct dyn_ftrace *node;
268         unsigned long flags;
269         unsigned long key;
270         int resched;
271         int atomic;
272         int cpu;
273
274         if (!ftrace_enabled || ftrace_disabled)
275                 return;
276
277         resched = need_resched();
278         preempt_disable_notrace();
279
280         /*
281          * We simply need to protect against recursion.
282          * Use the the raw version of smp_processor_id and not
283          * __get_cpu_var which can call debug hooks that can
284          * cause a recursive crash here.
285          */
286         cpu = raw_smp_processor_id();
287         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
288         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
289                 goto out;
290
291         if (unlikely(ftrace_record_suspend))
292                 goto out;
293
294         key = hash_long(ip, FTRACE_HASHBITS);
295
296         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
297
298         if (ftrace_ip_in_hash(ip, key))
299                 goto out;
300
301         atomic = irqs_disabled();
302
303         spin_lock_irqsave(&ftrace_shutdown_lock, flags);
304
305         /* This ip may have hit the hash before the lock */
306         if (ftrace_ip_in_hash(ip, key))
307                 goto out_unlock;
308
309         node = ftrace_alloc_dyn_node(ip);
310         if (!node)
311                 goto out_unlock;
312
313         node->ip = ip;
314
315         ftrace_add_hash(node, key);
316
317         ftraced_trigger = 1;
318
319  out_unlock:
320         spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
321  out:
322         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
323
324         /* prevent recursion with scheduler */
325         if (resched)
326                 preempt_enable_no_resched_notrace();
327         else
328                 preempt_enable_notrace();
329 }
330
331 #define FTRACE_ADDR ((long)(ftrace_caller))
332 #define MCOUNT_ADDR ((long)(mcount))
333
334 static int
335 __ftrace_replace_code(struct dyn_ftrace *rec,
336                       unsigned char *old, unsigned char *new, int enable)
337 {
338         unsigned long ip, fl;
339
340         ip = rec->ip;
341
342         if (ftrace_filtered && enable) {
343                 /*
344                  * If filtering is on:
345                  *
346                  * If this record is set to be filtered and
347                  * is enabled then do nothing.
348                  *
349                  * If this record is set to be filtered and
350                  * it is not enabled, enable it.
351                  *
352                  * If this record is not set to be filtered
353                  * and it is not enabled do nothing.
354                  *
355                  * If this record is set not to trace then
356                  * do nothing.
357                  *
358                  * If this record is not set to be filtered and
359                  * it is enabled, disable it.
360                  */
361                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
362
363                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
364                     (fl == 0) || (rec->flags & FTRACE_FL_NOTRACE))
365                         return 0;
366
367                 /*
368                  * If it is enabled disable it,
369                  * otherwise enable it!
370                  */
371                 if (fl == FTRACE_FL_ENABLED) {
372                         /* swap new and old */
373                         new = old;
374                         old = ftrace_call_replace(ip, FTRACE_ADDR);
375                         rec->flags &= ~FTRACE_FL_ENABLED;
376                 } else {
377                         new = ftrace_call_replace(ip, FTRACE_ADDR);
378                         rec->flags |= FTRACE_FL_ENABLED;
379                 }
380         } else {
381
382                 if (enable) {
383                         /*
384                          * If this record is set not to trace and is
385                          * not enabled, do nothing.
386                          */
387                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
388                         if (fl == FTRACE_FL_NOTRACE)
389                                 return 0;
390
391                         new = ftrace_call_replace(ip, FTRACE_ADDR);
392                 } else
393                         old = ftrace_call_replace(ip, FTRACE_ADDR);
394
395                 if (enable) {
396                         if (rec->flags & FTRACE_FL_ENABLED)
397                                 return 0;
398                         rec->flags |= FTRACE_FL_ENABLED;
399                 } else {
400                         if (!(rec->flags & FTRACE_FL_ENABLED))
401                                 return 0;
402                         rec->flags &= ~FTRACE_FL_ENABLED;
403                 }
404         }
405
406         return ftrace_modify_code(ip, old, new);
407 }
408
409 static void ftrace_replace_code(int enable)
410 {
411         int i, failed;
412         unsigned char *new = NULL, *old = NULL;
413         struct dyn_ftrace *rec;
414         struct ftrace_page *pg;
415
416         if (enable)
417                 old = ftrace_nop_replace();
418         else
419                 new = ftrace_nop_replace();
420
421         for (pg = ftrace_pages_start; pg; pg = pg->next) {
422                 for (i = 0; i < pg->index; i++) {
423                         rec = &pg->records[i];
424
425                         /* don't modify code that has already faulted */
426                         if (rec->flags & FTRACE_FL_FAILED)
427                                 continue;
428
429                         failed = __ftrace_replace_code(rec, old, new, enable);
430                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
431                                 rec->flags |= FTRACE_FL_FAILED;
432                                 if ((system_state == SYSTEM_BOOTING) ||
433                                     !kernel_text_address(rec->ip)) {
434                                         ftrace_del_hash(rec);
435                                         ftrace_free_rec(rec);
436                                 }
437                         }
438                 }
439         }
440 }
441
442 static void ftrace_shutdown_replenish(void)
443 {
444         if (ftrace_pages->next)
445                 return;
446
447         /* allocate another page */
448         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
449 }
450
451 static int
452 ftrace_code_disable(struct dyn_ftrace *rec)
453 {
454         unsigned long ip;
455         unsigned char *nop, *call;
456         int failed;
457
458         ip = rec->ip;
459
460         nop = ftrace_nop_replace();
461         call = ftrace_call_replace(ip, MCOUNT_ADDR);
462
463         failed = ftrace_modify_code(ip, call, nop);
464         if (failed) {
465                 rec->flags |= FTRACE_FL_FAILED;
466                 return 0;
467         }
468         return 1;
469 }
470
471 static int __ftrace_update_code(void *ignore);
472
473 static int __ftrace_modify_code(void *data)
474 {
475         unsigned long addr;
476         int *command = data;
477
478         if (*command & FTRACE_ENABLE_CALLS) {
479                 /*
480                  * Update any recorded ips now that we have the
481                  * machine stopped
482                  */
483                 __ftrace_update_code(NULL);
484                 ftrace_replace_code(1);
485         } else if (*command & FTRACE_DISABLE_CALLS)
486                 ftrace_replace_code(0);
487
488         if (*command & FTRACE_UPDATE_TRACE_FUNC)
489                 ftrace_update_ftrace_func(ftrace_trace_function);
490
491         if (*command & FTRACE_ENABLE_MCOUNT) {
492                 addr = (unsigned long)ftrace_record_ip;
493                 ftrace_mcount_set(&addr);
494         } else if (*command & FTRACE_DISABLE_MCOUNT) {
495                 addr = (unsigned long)ftrace_stub;
496                 ftrace_mcount_set(&addr);
497         }
498
499         return 0;
500 }
501
502 static void ftrace_run_update_code(int command)
503 {
504         stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
505 }
506
507 void ftrace_disable_daemon(void)
508 {
509         /* Stop the daemon from calling kstop_machine */
510         mutex_lock(&ftraced_lock);
511         ftraced_stop = 1;
512         mutex_unlock(&ftraced_lock);
513
514         ftrace_force_update();
515 }
516
517 void ftrace_enable_daemon(void)
518 {
519         mutex_lock(&ftraced_lock);
520         ftraced_stop = 0;
521         mutex_unlock(&ftraced_lock);
522
523         ftrace_force_update();
524 }
525
526 static ftrace_func_t saved_ftrace_func;
527
528 static void ftrace_startup(void)
529 {
530         int command = 0;
531
532         if (unlikely(ftrace_disabled))
533                 return;
534
535         mutex_lock(&ftraced_lock);
536         ftraced_suspend++;
537         if (ftraced_suspend == 1)
538                 command |= FTRACE_ENABLE_CALLS;
539
540         if (saved_ftrace_func != ftrace_trace_function) {
541                 saved_ftrace_func = ftrace_trace_function;
542                 command |= FTRACE_UPDATE_TRACE_FUNC;
543         }
544
545         if (!command || !ftrace_enabled)
546                 goto out;
547
548         ftrace_run_update_code(command);
549  out:
550         mutex_unlock(&ftraced_lock);
551 }
552
553 static void ftrace_shutdown(void)
554 {
555         int command = 0;
556
557         if (unlikely(ftrace_disabled))
558                 return;
559
560         mutex_lock(&ftraced_lock);
561         ftraced_suspend--;
562         if (!ftraced_suspend)
563                 command |= FTRACE_DISABLE_CALLS;
564
565         if (saved_ftrace_func != ftrace_trace_function) {
566                 saved_ftrace_func = ftrace_trace_function;
567                 command |= FTRACE_UPDATE_TRACE_FUNC;
568         }
569
570         if (!command || !ftrace_enabled)
571                 goto out;
572
573         ftrace_run_update_code(command);
574  out:
575         mutex_unlock(&ftraced_lock);
576 }
577
578 static void ftrace_startup_sysctl(void)
579 {
580         int command = FTRACE_ENABLE_MCOUNT;
581
582         if (unlikely(ftrace_disabled))
583                 return;
584
585         mutex_lock(&ftraced_lock);
586         /* Force update next time */
587         saved_ftrace_func = NULL;
588         /* ftraced_suspend is true if we want ftrace running */
589         if (ftraced_suspend)
590                 command |= FTRACE_ENABLE_CALLS;
591
592         ftrace_run_update_code(command);
593         mutex_unlock(&ftraced_lock);
594 }
595
596 static void ftrace_shutdown_sysctl(void)
597 {
598         int command = FTRACE_DISABLE_MCOUNT;
599
600         if (unlikely(ftrace_disabled))
601                 return;
602
603         mutex_lock(&ftraced_lock);
604         /* ftraced_suspend is true if ftrace is running */
605         if (ftraced_suspend)
606                 command |= FTRACE_DISABLE_CALLS;
607
608         ftrace_run_update_code(command);
609         mutex_unlock(&ftraced_lock);
610 }
611
612 static cycle_t          ftrace_update_time;
613 static unsigned long    ftrace_update_cnt;
614 unsigned long           ftrace_update_tot_cnt;
615
616 static int __ftrace_update_code(void *ignore)
617 {
618         struct dyn_ftrace *p;
619         struct hlist_node *t, *n;
620         int save_ftrace_enabled;
621         cycle_t start, stop;
622         int i;
623
624         /* Don't be recording funcs now */
625         ftrace_record_suspend++;
626         save_ftrace_enabled = ftrace_enabled;
627         ftrace_enabled = 0;
628
629         start = ftrace_now(raw_smp_processor_id());
630         ftrace_update_cnt = 0;
631
632         /* No locks needed, the machine is stopped! */
633         for (i = 0; i < FTRACE_HASHSIZE; i++) {
634                 /* all CPUS are stopped, we are safe to modify code */
635                 hlist_for_each_entry_safe(p, t, n, &ftrace_hash[i], node) {
636                         /* Skip over failed records which have not been
637                          * freed. */
638                         if (p->flags & FTRACE_FL_FAILED)
639                                 continue;
640
641                         /* Unconverted records are always at the head of the
642                          * hash bucket. Once we encounter a converted record,
643                          * simply skip over to the next bucket. Saves ftraced
644                          * some processor cycles (ftrace does its bid for
645                          * global warming :-p ). */
646                         if (p->flags & (FTRACE_FL_CONVERTED))
647                                 break;
648
649                         if (ftrace_code_disable(p)) {
650                                 p->flags |= FTRACE_FL_CONVERTED;
651                                 ftrace_update_cnt++;
652                         } else {
653                                 if ((system_state == SYSTEM_BOOTING) ||
654                                     !kernel_text_address(p->ip)) {
655                                         ftrace_del_hash(p);
656                                         ftrace_free_rec(p);
657
658                                 }
659                         }
660                 }
661         }
662
663         stop = ftrace_now(raw_smp_processor_id());
664         ftrace_update_time = stop - start;
665         ftrace_update_tot_cnt += ftrace_update_cnt;
666         ftraced_trigger = 0;
667
668         ftrace_enabled = save_ftrace_enabled;
669         ftrace_record_suspend--;
670
671         return 0;
672 }
673
674 static int ftrace_update_code(void)
675 {
676         if (unlikely(ftrace_disabled) ||
677             !ftrace_enabled || !ftraced_trigger)
678                 return 0;
679
680         stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
681
682         return 1;
683 }
684
685 static int ftraced(void *ignore)
686 {
687         unsigned long usecs;
688
689         while (!kthread_should_stop()) {
690
691                 set_current_state(TASK_INTERRUPTIBLE);
692
693                 /* check once a second */
694                 schedule_timeout(HZ);
695
696                 if (unlikely(ftrace_disabled))
697                         continue;
698
699                 mutex_lock(&ftrace_sysctl_lock);
700                 mutex_lock(&ftraced_lock);
701                 if (!ftraced_suspend && !ftraced_stop &&
702                     ftrace_update_code()) {
703                         usecs = nsecs_to_usecs(ftrace_update_time);
704                         if (ftrace_update_tot_cnt > 100000) {
705                                 ftrace_update_tot_cnt = 0;
706                                 pr_info("hm, dftrace overflow: %lu change%s"
707                                         " (%lu total) in %lu usec%s\n",
708                                         ftrace_update_cnt,
709                                         ftrace_update_cnt != 1 ? "s" : "",
710                                         ftrace_update_tot_cnt,
711                                         usecs, usecs != 1 ? "s" : "");
712                                 ftrace_disabled = 1;
713                                 WARN_ON_ONCE(1);
714                         }
715                 }
716                 mutex_unlock(&ftraced_lock);
717                 mutex_unlock(&ftrace_sysctl_lock);
718
719                 ftrace_shutdown_replenish();
720         }
721         __set_current_state(TASK_RUNNING);
722         return 0;
723 }
724
725 static int __init ftrace_dyn_table_alloc(void)
726 {
727         struct ftrace_page *pg;
728         int cnt;
729         int i;
730
731         /* allocate a few pages */
732         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
733         if (!ftrace_pages_start)
734                 return -1;
735
736         /*
737          * Allocate a few more pages.
738          *
739          * TODO: have some parser search vmlinux before
740          *   final linking to find all calls to ftrace.
741          *   Then we can:
742          *    a) know how many pages to allocate.
743          *     and/or
744          *    b) set up the table then.
745          *
746          *  The dynamic code is still necessary for
747          *  modules.
748          */
749
750         pg = ftrace_pages = ftrace_pages_start;
751
752         cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
753
754         for (i = 0; i < cnt; i++) {
755                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
756
757                 /* If we fail, we'll try later anyway */
758                 if (!pg->next)
759                         break;
760
761                 pg = pg->next;
762         }
763
764         return 0;
765 }
766
767 enum {
768         FTRACE_ITER_FILTER      = (1 << 0),
769         FTRACE_ITER_CONT        = (1 << 1),
770         FTRACE_ITER_NOTRACE     = (1 << 2),
771         FTRACE_ITER_FAILURES    = (1 << 3),
772 };
773
774 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
775
776 struct ftrace_iterator {
777         loff_t                  pos;
778         struct ftrace_page      *pg;
779         unsigned                idx;
780         unsigned                flags;
781         unsigned char           buffer[FTRACE_BUFF_MAX+1];
782         unsigned                buffer_idx;
783         unsigned                filtered;
784 };
785
786 static void *
787 t_next(struct seq_file *m, void *v, loff_t *pos)
788 {
789         struct ftrace_iterator *iter = m->private;
790         struct dyn_ftrace *rec = NULL;
791
792         (*pos)++;
793
794  retry:
795         if (iter->idx >= iter->pg->index) {
796                 if (iter->pg->next) {
797                         iter->pg = iter->pg->next;
798                         iter->idx = 0;
799                         goto retry;
800                 }
801         } else {
802                 rec = &iter->pg->records[iter->idx++];
803                 if ((!(iter->flags & FTRACE_ITER_FAILURES) &&
804                      (rec->flags & FTRACE_FL_FAILED)) ||
805
806                     ((iter->flags & FTRACE_ITER_FAILURES) &&
807                      (!(rec->flags & FTRACE_FL_FAILED) ||
808                       (rec->flags & FTRACE_FL_FREE))) ||
809
810                     ((iter->flags & FTRACE_ITER_FILTER) &&
811                      !(rec->flags & FTRACE_FL_FILTER)) ||
812
813                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
814                      !(rec->flags & FTRACE_FL_NOTRACE))) {
815                         rec = NULL;
816                         goto retry;
817                 }
818         }
819
820         iter->pos = *pos;
821
822         return rec;
823 }
824
825 static void *t_start(struct seq_file *m, loff_t *pos)
826 {
827         struct ftrace_iterator *iter = m->private;
828         void *p = NULL;
829         loff_t l = -1;
830
831         if (*pos != iter->pos) {
832                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
833                         ;
834         } else {
835                 l = *pos;
836                 p = t_next(m, p, &l);
837         }
838
839         return p;
840 }
841
842 static void t_stop(struct seq_file *m, void *p)
843 {
844 }
845
846 static int t_show(struct seq_file *m, void *v)
847 {
848         struct dyn_ftrace *rec = v;
849         char str[KSYM_SYMBOL_LEN];
850
851         if (!rec)
852                 return 0;
853
854         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
855
856         seq_printf(m, "%s\n", str);
857
858         return 0;
859 }
860
861 static struct seq_operations show_ftrace_seq_ops = {
862         .start = t_start,
863         .next = t_next,
864         .stop = t_stop,
865         .show = t_show,
866 };
867
868 static int
869 ftrace_avail_open(struct inode *inode, struct file *file)
870 {
871         struct ftrace_iterator *iter;
872         int ret;
873
874         if (unlikely(ftrace_disabled))
875                 return -ENODEV;
876
877         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
878         if (!iter)
879                 return -ENOMEM;
880
881         iter->pg = ftrace_pages_start;
882         iter->pos = -1;
883
884         ret = seq_open(file, &show_ftrace_seq_ops);
885         if (!ret) {
886                 struct seq_file *m = file->private_data;
887
888                 m->private = iter;
889         } else {
890                 kfree(iter);
891         }
892
893         return ret;
894 }
895
896 int ftrace_avail_release(struct inode *inode, struct file *file)
897 {
898         struct seq_file *m = (struct seq_file *)file->private_data;
899         struct ftrace_iterator *iter = m->private;
900
901         seq_release(inode, file);
902         kfree(iter);
903
904         return 0;
905 }
906
907 static int
908 ftrace_failures_open(struct inode *inode, struct file *file)
909 {
910         int ret;
911         struct seq_file *m;
912         struct ftrace_iterator *iter;
913
914         ret = ftrace_avail_open(inode, file);
915         if (!ret) {
916                 m = (struct seq_file *)file->private_data;
917                 iter = (struct ftrace_iterator *)m->private;
918                 iter->flags = FTRACE_ITER_FAILURES;
919         }
920
921         return ret;
922 }
923
924
925 static void ftrace_filter_reset(int enable)
926 {
927         struct ftrace_page *pg;
928         struct dyn_ftrace *rec;
929         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
930         unsigned i;
931
932         /* keep kstop machine from running */
933         preempt_disable();
934         if (enable)
935                 ftrace_filtered = 0;
936         pg = ftrace_pages_start;
937         while (pg) {
938                 for (i = 0; i < pg->index; i++) {
939                         rec = &pg->records[i];
940                         if (rec->flags & FTRACE_FL_FAILED)
941                                 continue;
942                         rec->flags &= ~type;
943                 }
944                 pg = pg->next;
945         }
946         preempt_enable();
947 }
948
949 static int
950 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
951 {
952         struct ftrace_iterator *iter;
953         int ret = 0;
954
955         if (unlikely(ftrace_disabled))
956                 return -ENODEV;
957
958         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
959         if (!iter)
960                 return -ENOMEM;
961
962         mutex_lock(&ftrace_regex_lock);
963         if ((file->f_mode & FMODE_WRITE) &&
964             !(file->f_flags & O_APPEND))
965                 ftrace_filter_reset(enable);
966
967         if (file->f_mode & FMODE_READ) {
968                 iter->pg = ftrace_pages_start;
969                 iter->pos = -1;
970                 iter->flags = enable ? FTRACE_ITER_FILTER :
971                         FTRACE_ITER_NOTRACE;
972
973                 ret = seq_open(file, &show_ftrace_seq_ops);
974                 if (!ret) {
975                         struct seq_file *m = file->private_data;
976                         m->private = iter;
977                 } else
978                         kfree(iter);
979         } else
980                 file->private_data = iter;
981         mutex_unlock(&ftrace_regex_lock);
982
983         return ret;
984 }
985
986 static int
987 ftrace_filter_open(struct inode *inode, struct file *file)
988 {
989         return ftrace_regex_open(inode, file, 1);
990 }
991
992 static int
993 ftrace_notrace_open(struct inode *inode, struct file *file)
994 {
995         return ftrace_regex_open(inode, file, 0);
996 }
997
998 static ssize_t
999 ftrace_regex_read(struct file *file, char __user *ubuf,
1000                        size_t cnt, loff_t *ppos)
1001 {
1002         if (file->f_mode & FMODE_READ)
1003                 return seq_read(file, ubuf, cnt, ppos);
1004         else
1005                 return -EPERM;
1006 }
1007
1008 static loff_t
1009 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1010 {
1011         loff_t ret;
1012
1013         if (file->f_mode & FMODE_READ)
1014                 ret = seq_lseek(file, offset, origin);
1015         else
1016                 file->f_pos = ret = 1;
1017
1018         return ret;
1019 }
1020
1021 enum {
1022         MATCH_FULL,
1023         MATCH_FRONT_ONLY,
1024         MATCH_MIDDLE_ONLY,
1025         MATCH_END_ONLY,
1026 };
1027
1028 static void
1029 ftrace_match(unsigned char *buff, int len, int enable)
1030 {
1031         char str[KSYM_SYMBOL_LEN];
1032         char *search = NULL;
1033         struct ftrace_page *pg;
1034         struct dyn_ftrace *rec;
1035         int type = MATCH_FULL;
1036         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1037         unsigned i, match = 0, search_len = 0;
1038
1039         for (i = 0; i < len; i++) {
1040                 if (buff[i] == '*') {
1041                         if (!i) {
1042                                 search = buff + i + 1;
1043                                 type = MATCH_END_ONLY;
1044                                 search_len = len - (i + 1);
1045                         } else {
1046                                 if (type == MATCH_END_ONLY) {
1047                                         type = MATCH_MIDDLE_ONLY;
1048                                 } else {
1049                                         match = i;
1050                                         type = MATCH_FRONT_ONLY;
1051                                 }
1052                                 buff[i] = 0;
1053                                 break;
1054                         }
1055                 }
1056         }
1057
1058         /* keep kstop machine from running */
1059         preempt_disable();
1060         if (enable)
1061                 ftrace_filtered = 1;
1062         pg = ftrace_pages_start;
1063         while (pg) {
1064                 for (i = 0; i < pg->index; i++) {
1065                         int matched = 0;
1066                         char *ptr;
1067
1068                         rec = &pg->records[i];
1069                         if (rec->flags & FTRACE_FL_FAILED)
1070                                 continue;
1071                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1072                         switch (type) {
1073                         case MATCH_FULL:
1074                                 if (strcmp(str, buff) == 0)
1075                                         matched = 1;
1076                                 break;
1077                         case MATCH_FRONT_ONLY:
1078                                 if (memcmp(str, buff, match) == 0)
1079                                         matched = 1;
1080                                 break;
1081                         case MATCH_MIDDLE_ONLY:
1082                                 if (strstr(str, search))
1083                                         matched = 1;
1084                                 break;
1085                         case MATCH_END_ONLY:
1086                                 ptr = strstr(str, search);
1087                                 if (ptr && (ptr[search_len] == 0))
1088                                         matched = 1;
1089                                 break;
1090                         }
1091                         if (matched)
1092                                 rec->flags |= flag;
1093                 }
1094                 pg = pg->next;
1095         }
1096         preempt_enable();
1097 }
1098
1099 static ssize_t
1100 ftrace_regex_write(struct file *file, const char __user *ubuf,
1101                    size_t cnt, loff_t *ppos, int enable)
1102 {
1103         struct ftrace_iterator *iter;
1104         char ch;
1105         size_t read = 0;
1106         ssize_t ret;
1107
1108         if (!cnt || cnt < 0)
1109                 return 0;
1110
1111         mutex_lock(&ftrace_regex_lock);
1112
1113         if (file->f_mode & FMODE_READ) {
1114                 struct seq_file *m = file->private_data;
1115                 iter = m->private;
1116         } else
1117                 iter = file->private_data;
1118
1119         if (!*ppos) {
1120                 iter->flags &= ~FTRACE_ITER_CONT;
1121                 iter->buffer_idx = 0;
1122         }
1123
1124         ret = get_user(ch, ubuf++);
1125         if (ret)
1126                 goto out;
1127         read++;
1128         cnt--;
1129
1130         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1131                 /* skip white space */
1132                 while (cnt && isspace(ch)) {
1133                         ret = get_user(ch, ubuf++);
1134                         if (ret)
1135                                 goto out;
1136                         read++;
1137                         cnt--;
1138                 }
1139
1140                 if (isspace(ch)) {
1141                         file->f_pos += read;
1142                         ret = read;
1143                         goto out;
1144                 }
1145
1146                 iter->buffer_idx = 0;
1147         }
1148
1149         while (cnt && !isspace(ch)) {
1150                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1151                         iter->buffer[iter->buffer_idx++] = ch;
1152                 else {
1153                         ret = -EINVAL;
1154                         goto out;
1155                 }
1156                 ret = get_user(ch, ubuf++);
1157                 if (ret)
1158                         goto out;
1159                 read++;
1160                 cnt--;
1161         }
1162
1163         if (isspace(ch)) {
1164                 iter->filtered++;
1165                 iter->buffer[iter->buffer_idx] = 0;
1166                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1167                 iter->buffer_idx = 0;
1168         } else
1169                 iter->flags |= FTRACE_ITER_CONT;
1170
1171
1172         file->f_pos += read;
1173
1174         ret = read;
1175  out:
1176         mutex_unlock(&ftrace_regex_lock);
1177
1178         return ret;
1179 }
1180
1181 static ssize_t
1182 ftrace_filter_write(struct file *file, const char __user *ubuf,
1183                     size_t cnt, loff_t *ppos)
1184 {
1185         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1186 }
1187
1188 static ssize_t
1189 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1190                      size_t cnt, loff_t *ppos)
1191 {
1192         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1193 }
1194
1195 static void
1196 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1197 {
1198         if (unlikely(ftrace_disabled))
1199                 return;
1200
1201         mutex_lock(&ftrace_regex_lock);
1202         if (reset)
1203                 ftrace_filter_reset(enable);
1204         if (buf)
1205                 ftrace_match(buf, len, enable);
1206         mutex_unlock(&ftrace_regex_lock);
1207 }
1208
1209 /**
1210  * ftrace_set_filter - set a function to filter on in ftrace
1211  * @buf - the string that holds the function filter text.
1212  * @len - the length of the string.
1213  * @reset - non zero to reset all filters before applying this filter.
1214  *
1215  * Filters denote which functions should be enabled when tracing is enabled.
1216  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1217  */
1218 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1219 {
1220         ftrace_set_regex(buf, len, reset, 1);
1221 }
1222
1223 /**
1224  * ftrace_set_notrace - set a function to not trace in ftrace
1225  * @buf - the string that holds the function notrace text.
1226  * @len - the length of the string.
1227  * @reset - non zero to reset all filters before applying this filter.
1228  *
1229  * Notrace Filters denote which functions should not be enabled when tracing
1230  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1231  * for tracing.
1232  */
1233 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1234 {
1235         ftrace_set_regex(buf, len, reset, 0);
1236 }
1237
1238 static int
1239 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1240 {
1241         struct seq_file *m = (struct seq_file *)file->private_data;
1242         struct ftrace_iterator *iter;
1243
1244         mutex_lock(&ftrace_regex_lock);
1245         if (file->f_mode & FMODE_READ) {
1246                 iter = m->private;
1247
1248                 seq_release(inode, file);
1249         } else
1250                 iter = file->private_data;
1251
1252         if (iter->buffer_idx) {
1253                 iter->filtered++;
1254                 iter->buffer[iter->buffer_idx] = 0;
1255                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1256         }
1257
1258         mutex_lock(&ftrace_sysctl_lock);
1259         mutex_lock(&ftraced_lock);
1260         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1261                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1262         mutex_unlock(&ftraced_lock);
1263         mutex_unlock(&ftrace_sysctl_lock);
1264
1265         kfree(iter);
1266         mutex_unlock(&ftrace_regex_lock);
1267         return 0;
1268 }
1269
1270 static int
1271 ftrace_filter_release(struct inode *inode, struct file *file)
1272 {
1273         return ftrace_regex_release(inode, file, 1);
1274 }
1275
1276 static int
1277 ftrace_notrace_release(struct inode *inode, struct file *file)
1278 {
1279         return ftrace_regex_release(inode, file, 0);
1280 }
1281
1282 static ssize_t
1283 ftraced_read(struct file *filp, char __user *ubuf,
1284                      size_t cnt, loff_t *ppos)
1285 {
1286         /* don't worry about races */
1287         char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1288         int r = strlen(buf);
1289
1290         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1291 }
1292
1293 static ssize_t
1294 ftraced_write(struct file *filp, const char __user *ubuf,
1295                       size_t cnt, loff_t *ppos)
1296 {
1297         char buf[64];
1298         long val;
1299         int ret;
1300
1301         if (cnt >= sizeof(buf))
1302                 return -EINVAL;
1303
1304         if (copy_from_user(&buf, ubuf, cnt))
1305                 return -EFAULT;
1306
1307         if (strncmp(buf, "enable", 6) == 0)
1308                 val = 1;
1309         else if (strncmp(buf, "disable", 7) == 0)
1310                 val = 0;
1311         else {
1312                 buf[cnt] = 0;
1313
1314                 ret = strict_strtoul(buf, 10, &val);
1315                 if (ret < 0)
1316                         return ret;
1317
1318                 val = !!val;
1319         }
1320
1321         if (val)
1322                 ftrace_enable_daemon();
1323         else
1324                 ftrace_disable_daemon();
1325
1326         filp->f_pos += cnt;
1327
1328         return cnt;
1329 }
1330
1331 static struct file_operations ftrace_avail_fops = {
1332         .open = ftrace_avail_open,
1333         .read = seq_read,
1334         .llseek = seq_lseek,
1335         .release = ftrace_avail_release,
1336 };
1337
1338 static struct file_operations ftrace_failures_fops = {
1339         .open = ftrace_failures_open,
1340         .read = seq_read,
1341         .llseek = seq_lseek,
1342         .release = ftrace_avail_release,
1343 };
1344
1345 static struct file_operations ftrace_filter_fops = {
1346         .open = ftrace_filter_open,
1347         .read = ftrace_regex_read,
1348         .write = ftrace_filter_write,
1349         .llseek = ftrace_regex_lseek,
1350         .release = ftrace_filter_release,
1351 };
1352
1353 static struct file_operations ftrace_notrace_fops = {
1354         .open = ftrace_notrace_open,
1355         .read = ftrace_regex_read,
1356         .write = ftrace_notrace_write,
1357         .llseek = ftrace_regex_lseek,
1358         .release = ftrace_notrace_release,
1359 };
1360
1361 static struct file_operations ftraced_fops = {
1362         .open = tracing_open_generic,
1363         .read = ftraced_read,
1364         .write = ftraced_write,
1365 };
1366
1367 /**
1368  * ftrace_force_update - force an update to all recording ftrace functions
1369  */
1370 int ftrace_force_update(void)
1371 {
1372         int ret = 0;
1373
1374         if (unlikely(ftrace_disabled))
1375                 return -ENODEV;
1376
1377         mutex_lock(&ftrace_sysctl_lock);
1378         mutex_lock(&ftraced_lock);
1379
1380         /*
1381          * If ftraced_trigger is not set, then there is nothing
1382          * to update.
1383          */
1384         if (ftraced_trigger && !ftrace_update_code())
1385                 ret = -EBUSY;
1386
1387         mutex_unlock(&ftraced_lock);
1388         mutex_unlock(&ftrace_sysctl_lock);
1389
1390         return ret;
1391 }
1392
1393 static void ftrace_force_shutdown(void)
1394 {
1395         struct task_struct *task;
1396         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1397
1398         mutex_lock(&ftraced_lock);
1399         task = ftraced_task;
1400         ftraced_task = NULL;
1401         ftraced_suspend = -1;
1402         ftrace_run_update_code(command);
1403         mutex_unlock(&ftraced_lock);
1404
1405         if (task)
1406                 kthread_stop(task);
1407 }
1408
1409 static __init int ftrace_init_debugfs(void)
1410 {
1411         struct dentry *d_tracer;
1412         struct dentry *entry;
1413
1414         d_tracer = tracing_init_dentry();
1415
1416         entry = debugfs_create_file("available_filter_functions", 0444,
1417                                     d_tracer, NULL, &ftrace_avail_fops);
1418         if (!entry)
1419                 pr_warning("Could not create debugfs "
1420                            "'available_filter_functions' entry\n");
1421
1422         entry = debugfs_create_file("failures", 0444,
1423                                     d_tracer, NULL, &ftrace_failures_fops);
1424         if (!entry)
1425                 pr_warning("Could not create debugfs 'failures' entry\n");
1426
1427         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1428                                     NULL, &ftrace_filter_fops);
1429         if (!entry)
1430                 pr_warning("Could not create debugfs "
1431                            "'set_ftrace_filter' entry\n");
1432
1433         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1434                                     NULL, &ftrace_notrace_fops);
1435         if (!entry)
1436                 pr_warning("Could not create debugfs "
1437                            "'set_ftrace_notrace' entry\n");
1438
1439         entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1440                                     NULL, &ftraced_fops);
1441         if (!entry)
1442                 pr_warning("Could not create debugfs "
1443                            "'ftraced_enabled' entry\n");
1444         return 0;
1445 }
1446
1447 fs_initcall(ftrace_init_debugfs);
1448
1449 static int __init ftrace_dynamic_init(void)
1450 {
1451         struct task_struct *p;
1452         unsigned long addr;
1453         int ret;
1454
1455         addr = (unsigned long)ftrace_record_ip;
1456
1457         stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1458
1459         /* ftrace_dyn_arch_init places the return code in addr */
1460         if (addr) {
1461                 ret = (int)addr;
1462                 goto failed;
1463         }
1464
1465         ret = ftrace_dyn_table_alloc();
1466         if (ret)
1467                 goto failed;
1468
1469         p = kthread_run(ftraced, NULL, "ftraced");
1470         if (IS_ERR(p)) {
1471                 ret = -1;
1472                 goto failed;
1473         }
1474
1475         last_ftrace_enabled = ftrace_enabled = 1;
1476         ftraced_task = p;
1477
1478         return 0;
1479
1480  failed:
1481         ftrace_disabled = 1;
1482         return ret;
1483 }
1484
1485 core_initcall(ftrace_dynamic_init);
1486 #else
1487 # define ftrace_startup()               do { } while (0)
1488 # define ftrace_shutdown()              do { } while (0)
1489 # define ftrace_startup_sysctl()        do { } while (0)
1490 # define ftrace_shutdown_sysctl()       do { } while (0)
1491 # define ftrace_force_shutdown()        do { } while (0)
1492 #endif /* CONFIG_DYNAMIC_FTRACE */
1493
1494 /**
1495  * ftrace_kill - totally shutdown ftrace
1496  *
1497  * This is a safety measure. If something was detected that seems
1498  * wrong, calling this function will keep ftrace from doing
1499  * any more modifications, and updates.
1500  * used when something went wrong.
1501  */
1502 void ftrace_kill(void)
1503 {
1504         mutex_lock(&ftrace_sysctl_lock);
1505         ftrace_disabled = 1;
1506         ftrace_enabled = 0;
1507
1508         clear_ftrace_function();
1509         mutex_unlock(&ftrace_sysctl_lock);
1510
1511         /* Try to totally disable ftrace */
1512         ftrace_force_shutdown();
1513 }
1514
1515 /**
1516  * register_ftrace_function - register a function for profiling
1517  * @ops - ops structure that holds the function for profiling.
1518  *
1519  * Register a function to be called by all functions in the
1520  * kernel.
1521  *
1522  * Note: @ops->func and all the functions it calls must be labeled
1523  *       with "notrace", otherwise it will go into a
1524  *       recursive loop.
1525  */
1526 int register_ftrace_function(struct ftrace_ops *ops)
1527 {
1528         int ret;
1529
1530         if (unlikely(ftrace_disabled))
1531                 return -1;
1532
1533         mutex_lock(&ftrace_sysctl_lock);
1534         ret = __register_ftrace_function(ops);
1535         ftrace_startup();
1536         mutex_unlock(&ftrace_sysctl_lock);
1537
1538         return ret;
1539 }
1540
1541 /**
1542  * unregister_ftrace_function - unresgister a function for profiling.
1543  * @ops - ops structure that holds the function to unregister
1544  *
1545  * Unregister a function that was added to be called by ftrace profiling.
1546  */
1547 int unregister_ftrace_function(struct ftrace_ops *ops)
1548 {
1549         int ret;
1550
1551         mutex_lock(&ftrace_sysctl_lock);
1552         ret = __unregister_ftrace_function(ops);
1553         ftrace_shutdown();
1554         mutex_unlock(&ftrace_sysctl_lock);
1555
1556         return ret;
1557 }
1558
1559 int
1560 ftrace_enable_sysctl(struct ctl_table *table, int write,
1561                      struct file *file, void __user *buffer, size_t *lenp,
1562                      loff_t *ppos)
1563 {
1564         int ret;
1565
1566         if (unlikely(ftrace_disabled))
1567                 return -ENODEV;
1568
1569         mutex_lock(&ftrace_sysctl_lock);
1570
1571         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1572
1573         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1574                 goto out;
1575
1576         last_ftrace_enabled = ftrace_enabled;
1577
1578         if (ftrace_enabled) {
1579
1580                 ftrace_startup_sysctl();
1581
1582                 /* we are starting ftrace again */
1583                 if (ftrace_list != &ftrace_list_end) {
1584                         if (ftrace_list->next == &ftrace_list_end)
1585                                 ftrace_trace_function = ftrace_list->func;
1586                         else
1587                                 ftrace_trace_function = ftrace_list_func;
1588                 }
1589
1590         } else {
1591                 /* stopping ftrace calls (just send to ftrace_stub) */
1592                 ftrace_trace_function = ftrace_stub;
1593
1594                 ftrace_shutdown_sysctl();
1595         }
1596
1597  out:
1598         mutex_unlock(&ftrace_sysctl_lock);
1599         return ret;
1600 }