2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
14 #include <linux/utsrelease.h>
15 #include <linux/kallsyms.h>
16 #include <linux/seq_file.h>
17 #include <linux/debugfs.h>
18 #include <linux/pagemap.h>
19 #include <linux/hardirq.h>
20 #include <linux/linkage.h>
21 #include <linux/uaccess.h>
22 #include <linux/ftrace.h>
23 #include <linux/module.h>
24 #include <linux/percpu.h>
25 #include <linux/ctype.h>
26 #include <linux/init.h>
27 #include <linux/gfp.h>
32 unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
33 unsigned long __read_mostly tracing_thresh;
35 static int tracing_disabled = 1;
38 ns2usecs(cycle_t nsec)
45 notrace cycle_t ftrace_now(int cpu)
47 return cpu_clock(cpu);
50 static struct trace_array global_trace;
52 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
54 static struct trace_array max_tr;
56 static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
58 static int tracer_enabled = 1;
59 static unsigned long trace_nr_entries = 16384UL;
61 static struct tracer *trace_types __read_mostly;
62 static struct tracer *current_trace __read_mostly;
63 static int max_tracer_type_len;
65 static DEFINE_MUTEX(trace_types_lock);
67 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
69 static int __init set_nr_entries(char *str)
73 trace_nr_entries = simple_strtoul(str, &str, 0);
76 __setup("trace_entries=", set_nr_entries);
78 unsigned long nsecs_to_usecs(unsigned long nsecs)
84 __TRACE_FIRST_TYPE = 0,
93 enum trace_flag_type {
94 TRACE_FLAG_IRQS_OFF = 0x01,
95 TRACE_FLAG_NEED_RESCHED = 0x02,
96 TRACE_FLAG_HARDIRQ = 0x04,
97 TRACE_FLAG_SOFTIRQ = 0x08,
100 enum trace_iterator_flags {
101 TRACE_ITER_PRINT_PARENT = 0x01,
102 TRACE_ITER_SYM_OFFSET = 0x02,
103 TRACE_ITER_SYM_ADDR = 0x04,
104 TRACE_ITER_VERBOSE = 0x08,
105 TRACE_ITER_RAW = 0x10,
106 TRACE_ITER_BIN = 0x20,
109 #define TRACE_ITER_SYM_MASK \
110 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
112 /* These must match the bit postions above */
113 static const char *trace_options[] = {
123 static unsigned trace_flags;
125 static DEFINE_SPINLOCK(ftrace_max_lock);
128 * Copy the new maximum trace into the separate maximum-trace
129 * structure. (this way the maximum trace is permanently saved,
130 * for later retrieval via /debugfs/tracing/latency_trace)
133 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
135 struct trace_array_cpu *data = tr->data[cpu];
138 max_tr.time_start = data->preempt_timestamp;
140 data = max_tr.data[cpu];
141 data->saved_latency = tracing_max_latency;
143 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
144 data->pid = tsk->pid;
145 data->uid = tsk->uid;
146 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
147 data->policy = tsk->policy;
148 data->rt_priority = tsk->rt_priority;
150 /* record this tasks comm */
151 tracing_record_cmdline(current);
154 void check_pages(struct trace_array_cpu *data)
156 struct page *page, *tmp;
158 BUG_ON(data->trace_pages.next->prev != &data->trace_pages);
159 BUG_ON(data->trace_pages.prev->next != &data->trace_pages);
161 list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
162 BUG_ON(page->lru.next->prev != &page->lru);
163 BUG_ON(page->lru.prev->next != &page->lru);
167 void *head_page(struct trace_array_cpu *data)
172 if (list_empty(&data->trace_pages))
175 page = list_entry(data->trace_pages.next, struct page, lru);
176 BUG_ON(&page->lru == &data->trace_pages);
178 return page_address(page);
182 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
184 int len = (PAGE_SIZE - 1) - s->len;
192 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
195 /* If we can't write it all, don't bother writing anything */
205 trace_seq_puts(struct trace_seq *s, const char *str)
207 int len = strlen(str);
209 if (len > ((PAGE_SIZE - 1) - s->len))
212 memcpy(s->buffer + s->len, str, len);
219 trace_seq_putc(struct trace_seq *s, unsigned char c)
221 if (s->len >= (PAGE_SIZE - 1))
224 s->buffer[s->len++] = c;
230 trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
232 if (len > ((PAGE_SIZE - 1) - s->len))
235 memcpy(s->buffer + s->len, mem, len);
242 trace_seq_reset(struct trace_seq *s)
248 trace_print_seq(struct seq_file *m, struct trace_seq *s)
250 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
253 seq_puts(m, s->buffer);
259 flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
261 struct list_head flip_pages;
263 INIT_LIST_HEAD(&flip_pages);
265 memcpy(&tr1->trace_head_idx, &tr2->trace_head_idx,
266 sizeof(struct trace_array_cpu) -
267 offsetof(struct trace_array_cpu, trace_head_idx));
271 list_splice_init(&tr1->trace_pages, &flip_pages);
272 list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
273 list_splice_init(&flip_pages, &tr2->trace_pages);
274 BUG_ON(!list_empty(&flip_pages));
280 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
282 struct trace_array_cpu *data;
285 WARN_ON_ONCE(!irqs_disabled());
286 spin_lock(&ftrace_max_lock);
287 /* clear out all the previous traces */
288 for_each_possible_cpu(i) {
290 flip_trace(max_tr.data[i], data);
294 __update_max_tr(tr, tsk, cpu);
295 spin_unlock(&ftrace_max_lock);
299 * update_max_tr_single - only copy one trace over, and reset the rest
301 * @tsk - task with the latency
302 * @cpu - the cpu of the buffer to copy.
305 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
307 struct trace_array_cpu *data = tr->data[cpu];
310 WARN_ON_ONCE(!irqs_disabled());
311 spin_lock(&ftrace_max_lock);
312 for_each_possible_cpu(i)
313 tracing_reset(max_tr.data[i]);
315 flip_trace(max_tr.data[cpu], data);
318 __update_max_tr(tr, tsk, cpu);
319 spin_unlock(&ftrace_max_lock);
322 int register_tracer(struct tracer *type)
329 pr_info("Tracer must have a name\n");
333 mutex_lock(&trace_types_lock);
334 for (t = trace_types; t; t = t->next) {
335 if (strcmp(type->name, t->name) == 0) {
337 pr_info("Trace %s already registered\n",
344 #ifdef CONFIG_FTRACE_STARTUP_TEST
345 if (type->selftest) {
346 struct tracer *saved_tracer = current_trace;
347 struct trace_array_cpu *data;
348 struct trace_array *tr = &global_trace;
349 int saved_ctrl = tr->ctrl;
352 * Run a selftest on this tracer.
353 * Here we reset the trace buffer, and set the current
354 * tracer to be this tracer. The tracer can then run some
355 * internal tracing to verify that everything is in order.
356 * If we fail, we do not register this tracer.
358 for_each_possible_cpu(i) {
360 if (!head_page(data))
364 current_trace = type;
366 /* the test is responsible for initializing and enabling */
367 pr_info("Testing tracer %s: ", type->name);
368 ret = type->selftest(type, tr);
369 /* the test is responsible for resetting too */
370 current_trace = saved_tracer;
371 tr->ctrl = saved_ctrl;
373 printk(KERN_CONT "FAILED!\n");
376 /* Only reset on passing, to avoid touching corrupted buffers */
377 for_each_possible_cpu(i) {
379 if (!head_page(data))
383 printk(KERN_CONT "PASSED\n");
387 type->next = trace_types;
389 len = strlen(type->name);
390 if (len > max_tracer_type_len)
391 max_tracer_type_len = len;
394 mutex_unlock(&trace_types_lock);
399 void unregister_tracer(struct tracer *type)
404 mutex_lock(&trace_types_lock);
405 for (t = &trace_types; *t; t = &(*t)->next) {
409 pr_info("Trace %s not registered\n", type->name);
414 if (strlen(type->name) != max_tracer_type_len)
417 max_tracer_type_len = 0;
418 for (t = &trace_types; *t; t = &(*t)->next) {
419 len = strlen((*t)->name);
420 if (len > max_tracer_type_len)
421 max_tracer_type_len = len;
424 mutex_unlock(&trace_types_lock);
427 notrace void tracing_reset(struct trace_array_cpu *data)
430 data->trace_head = data->trace_tail = head_page(data);
431 data->trace_head_idx = 0;
432 data->trace_tail_idx = 0;
437 function_trace_call(unsigned long ip, unsigned long parent_ip)
439 struct trace_array *tr = &global_trace;
440 struct trace_array_cpu *data;
445 if (unlikely(!tracer_enabled))
448 local_irq_save(flags);
449 cpu = raw_smp_processor_id();
450 data = tr->data[cpu];
451 disabled = atomic_inc_return(&data->disabled);
453 if (likely(disabled == 1))
454 ftrace(tr, data, ip, parent_ip, flags);
456 atomic_dec(&data->disabled);
457 local_irq_restore(flags);
460 static struct ftrace_ops trace_ops __read_mostly =
462 .func = function_trace_call,
466 notrace void tracing_start_function_trace(void)
468 register_ftrace_function(&trace_ops);
471 notrace void tracing_stop_function_trace(void)
473 unregister_ftrace_function(&trace_ops);
476 #define SAVED_CMDLINES 128
477 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
478 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
479 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
480 static int cmdline_idx;
481 static DEFINE_SPINLOCK(trace_cmdline_lock);
482 atomic_t trace_record_cmdline_disabled;
484 static void trace_init_cmdlines(void)
486 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
487 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
491 notrace void trace_stop_cmdline_recording(void);
493 static notrace void trace_save_cmdline(struct task_struct *tsk)
498 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
502 * It's not the end of the world if we don't get
503 * the lock, but we also don't want to spin
504 * nor do we want to disable interrupts,
505 * so if we miss here, then better luck next time.
507 if (!spin_trylock(&trace_cmdline_lock))
510 idx = map_pid_to_cmdline[tsk->pid];
511 if (idx >= SAVED_CMDLINES) {
512 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
514 map = map_cmdline_to_pid[idx];
515 if (map <= PID_MAX_DEFAULT)
516 map_pid_to_cmdline[map] = (unsigned)-1;
518 map_pid_to_cmdline[tsk->pid] = idx;
523 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
525 spin_unlock(&trace_cmdline_lock);
528 static notrace char *trace_find_cmdline(int pid)
530 char *cmdline = "<...>";
536 if (pid > PID_MAX_DEFAULT)
539 map = map_pid_to_cmdline[pid];
540 if (map >= SAVED_CMDLINES)
543 cmdline = saved_cmdlines[map];
549 notrace void tracing_record_cmdline(struct task_struct *tsk)
551 if (atomic_read(&trace_record_cmdline_disabled))
554 trace_save_cmdline(tsk);
557 static inline notrace struct list_head *
558 trace_next_list(struct trace_array_cpu *data, struct list_head *next)
561 * Roundrobin - but skip the head (which is not a real page):
564 if (unlikely(next == &data->trace_pages))
566 BUG_ON(next == &data->trace_pages);
571 static inline notrace void *
572 trace_next_page(struct trace_array_cpu *data, void *addr)
574 struct list_head *next;
577 page = virt_to_page(addr);
579 next = trace_next_list(data, &page->lru);
580 page = list_entry(next, struct page, lru);
582 return page_address(page);
585 static inline notrace struct trace_entry *
586 tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
588 unsigned long idx, idx_next;
589 struct trace_entry *entry;
592 idx = data->trace_head_idx;
595 BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
597 entry = data->trace_head + idx * TRACE_ENTRY_SIZE;
599 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
600 data->trace_head = trace_next_page(data, data->trace_head);
604 if (data->trace_head == data->trace_tail &&
605 idx_next == data->trace_tail_idx) {
607 data->trace_tail_idx++;
608 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
610 trace_next_page(data, data->trace_tail);
611 data->trace_tail_idx = 0;
615 data->trace_head_idx = idx_next;
620 static inline notrace void
621 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
623 struct task_struct *tsk = current;
626 pc = preempt_count();
628 entry->preempt_count = pc & 0xff;
629 entry->pid = tsk->pid;
630 entry->t = ftrace_now(raw_smp_processor_id());
631 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
632 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
633 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
634 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
638 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
639 unsigned long ip, unsigned long parent_ip, unsigned long flags)
641 struct trace_entry *entry;
642 unsigned long irq_flags;
644 spin_lock_irqsave(&data->lock, irq_flags);
645 entry = tracing_get_trace_entry(tr, data);
646 tracing_generic_entry_update(entry, flags);
647 entry->type = TRACE_FN;
649 entry->fn.parent_ip = parent_ip;
650 spin_unlock_irqrestore(&data->lock, irq_flags);
654 trace_special(struct trace_array *tr, struct trace_array_cpu *data,
655 unsigned long arg1, unsigned long arg2, unsigned long arg3)
657 struct trace_entry *entry;
658 unsigned long irq_flags;
660 spin_lock_irqsave(&data->lock, irq_flags);
661 entry = tracing_get_trace_entry(tr, data);
662 tracing_generic_entry_update(entry, 0);
663 entry->type = TRACE_SPECIAL;
664 entry->special.arg1 = arg1;
665 entry->special.arg2 = arg2;
666 entry->special.arg3 = arg3;
667 spin_unlock_irqrestore(&data->lock, irq_flags);
671 tracing_sched_switch_trace(struct trace_array *tr,
672 struct trace_array_cpu *data,
673 struct task_struct *prev, struct task_struct *next,
676 struct trace_entry *entry;
677 unsigned long irq_flags;
679 spin_lock_irqsave(&data->lock, irq_flags);
680 entry = tracing_get_trace_entry(tr, data);
681 tracing_generic_entry_update(entry, flags);
682 entry->type = TRACE_CTX;
683 entry->ctx.prev_pid = prev->pid;
684 entry->ctx.prev_prio = prev->prio;
685 entry->ctx.prev_state = prev->state;
686 entry->ctx.next_pid = next->pid;
687 entry->ctx.next_prio = next->prio;
688 spin_unlock_irqrestore(&data->lock, irq_flags);
691 enum trace_file_type {
692 TRACE_FILE_LAT_FMT = 1,
695 static struct trace_entry *
696 trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
697 struct trace_iterator *iter, int cpu)
700 struct trace_entry *array;
702 if (iter->next_idx[cpu] >= tr->entries ||
703 iter->next_idx[cpu] >= data->trace_idx ||
704 (data->trace_head == data->trace_tail &&
705 data->trace_head_idx == data->trace_tail_idx))
708 if (!iter->next_page[cpu]) {
709 /* Initialize the iterator for this cpu trace buffer */
710 WARN_ON(!data->trace_tail);
711 page = virt_to_page(data->trace_tail);
712 iter->next_page[cpu] = &page->lru;
713 iter->next_page_idx[cpu] = data->trace_tail_idx;
716 page = list_entry(iter->next_page[cpu], struct page, lru);
717 BUG_ON(&data->trace_pages == &page->lru);
719 array = page_address(page);
721 WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE);
722 return &array[iter->next_page_idx[cpu]];
725 static struct notrace trace_entry *
726 find_next_entry(struct trace_iterator *iter, int *ent_cpu)
728 struct trace_array *tr = iter->tr;
729 struct trace_entry *ent, *next = NULL;
733 for_each_possible_cpu(cpu) {
734 if (!head_page(tr->data[cpu]))
736 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
738 * Pick the entry with the smallest timestamp:
740 if (ent && (!next || ent->t < next->t)) {
752 static notrace void trace_iterator_increment(struct trace_iterator *iter)
755 iter->next_idx[iter->cpu]++;
756 iter->next_page_idx[iter->cpu]++;
758 if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) {
759 struct trace_array_cpu *data = iter->tr->data[iter->cpu];
761 iter->next_page_idx[iter->cpu] = 0;
762 iter->next_page[iter->cpu] =
763 trace_next_list(data, iter->next_page[iter->cpu]);
767 static notrace void trace_consume(struct trace_iterator *iter)
769 struct trace_array_cpu *data = iter->tr->data[iter->cpu];
771 data->trace_tail_idx++;
772 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
773 data->trace_tail = trace_next_page(data, data->trace_tail);
774 data->trace_tail_idx = 0;
777 /* Check if we empty it, then reset the index */
778 if (data->trace_head == data->trace_tail &&
779 data->trace_head_idx == data->trace_tail_idx)
783 static notrace void *find_next_entry_inc(struct trace_iterator *iter)
785 struct trace_entry *next;
788 next = find_next_entry(iter, &next_cpu);
790 iter->prev_ent = iter->ent;
791 iter->prev_cpu = iter->cpu;
794 iter->cpu = next_cpu;
797 trace_iterator_increment(iter);
799 return next ? iter : NULL;
802 static notrace void *s_next(struct seq_file *m, void *v, loff_t *pos)
804 struct trace_iterator *iter = m->private;
805 void *last_ent = iter->ent;
811 /* can't go backwards */
816 ent = find_next_entry_inc(iter);
820 while (ent && iter->idx < i)
821 ent = find_next_entry_inc(iter);
825 if (last_ent && !ent)
826 seq_puts(m, "\n\nvim:ft=help\n");
831 static void *s_start(struct seq_file *m, loff_t *pos)
833 struct trace_iterator *iter = m->private;
838 mutex_lock(&trace_types_lock);
840 if (!current_trace || current_trace != iter->trace)
843 atomic_inc(&trace_record_cmdline_disabled);
845 /* let the tracer grab locks here if needed */
846 if (current_trace->start)
847 current_trace->start(iter);
849 if (*pos != iter->pos) {
853 iter->prev_ent = NULL;
856 for_each_possible_cpu(i) {
857 iter->next_idx[i] = 0;
858 iter->next_page[i] = NULL;
861 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
866 p = s_next(m, p, &l);
872 static void s_stop(struct seq_file *m, void *p)
874 struct trace_iterator *iter = m->private;
876 atomic_dec(&trace_record_cmdline_disabled);
878 /* let the tracer release locks here if needed */
879 if (current_trace && current_trace == iter->trace && iter->trace->stop)
880 iter->trace->stop(iter);
882 mutex_unlock(&trace_types_lock);
886 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
888 #ifdef CONFIG_KALLSYMS
889 char str[KSYM_SYMBOL_LEN];
891 kallsyms_lookup(address, NULL, NULL, NULL, str);
893 return trace_seq_printf(s, fmt, str);
899 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
900 unsigned long address)
902 #ifdef CONFIG_KALLSYMS
903 char str[KSYM_SYMBOL_LEN];
905 sprint_symbol(str, address);
906 return trace_seq_printf(s, fmt, str);
912 # define IP_FMT "%08lx"
914 # define IP_FMT "%016lx"
918 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
923 return trace_seq_printf(s, "0");
925 if (sym_flags & TRACE_ITER_SYM_OFFSET)
926 ret = seq_print_sym_offset(s, "%s", ip);
928 ret = seq_print_sym_short(s, "%s", ip);
933 if (sym_flags & TRACE_ITER_SYM_ADDR)
934 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
938 static notrace void print_lat_help_header(struct seq_file *m)
940 seq_puts(m, "# _------=> CPU# \n");
941 seq_puts(m, "# / _-----=> irqs-off \n");
942 seq_puts(m, "# | / _----=> need-resched \n");
943 seq_puts(m, "# || / _---=> hardirq/softirq \n");
944 seq_puts(m, "# ||| / _--=> preempt-depth \n");
945 seq_puts(m, "# |||| / \n");
946 seq_puts(m, "# ||||| delay \n");
947 seq_puts(m, "# cmd pid ||||| time | caller \n");
948 seq_puts(m, "# \\ / ||||| \\ | / \n");
951 static notrace void print_func_help_header(struct seq_file *m)
953 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
954 seq_puts(m, "# | | | | |\n");
959 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
961 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
962 struct trace_array *tr = iter->tr;
963 struct trace_array_cpu *data = tr->data[tr->cpu];
964 struct tracer *type = current_trace;
965 unsigned long total = 0;
966 unsigned long entries = 0;
968 const char *name = "preemption";
973 for_each_possible_cpu(cpu) {
974 if (head_page(tr->data[cpu])) {
975 total += tr->data[cpu]->trace_idx;
976 if (tr->data[cpu]->trace_idx > tr->entries)
977 entries += tr->entries;
979 entries += tr->data[cpu]->trace_idx;
983 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
985 seq_puts(m, "-----------------------------------"
986 "---------------------------------\n");
987 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
988 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
989 nsecs_to_usecs(data->saved_latency),
993 #if defined(CONFIG_PREEMPT_NONE)
995 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
997 #elif defined(CONFIG_PREEMPT_DESKTOP)
1002 /* These are reserved for later use */
1005 seq_printf(m, " #P:%d)\n", num_online_cpus());
1009 seq_puts(m, " -----------------\n");
1010 seq_printf(m, " | task: %.16s-%d "
1011 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1012 data->comm, data->pid, data->uid, data->nice,
1013 data->policy, data->rt_priority);
1014 seq_puts(m, " -----------------\n");
1016 if (data->critical_start) {
1017 seq_puts(m, " => started at: ");
1018 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1019 trace_print_seq(m, &iter->seq);
1020 seq_puts(m, "\n => ended at: ");
1021 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1022 trace_print_seq(m, &iter->seq);
1030 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1032 int hardirq, softirq;
1035 comm = trace_find_cmdline(entry->pid);
1037 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1038 trace_seq_printf(s, "%d", cpu);
1039 trace_seq_printf(s, "%c%c",
1040 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
1041 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
1043 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
1044 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
1045 if (hardirq && softirq)
1046 trace_seq_putc(s, 'H');
1049 trace_seq_putc(s, 'h');
1052 trace_seq_putc(s, 's');
1054 trace_seq_putc(s, '.');
1058 if (entry->preempt_count)
1059 trace_seq_printf(s, "%x", entry->preempt_count);
1061 trace_seq_puts(s, ".");
1064 unsigned long preempt_mark_thresh = 100;
1067 lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs,
1068 unsigned long rel_usecs)
1070 trace_seq_printf(s, " %4lldus", abs_usecs);
1071 if (rel_usecs > preempt_mark_thresh)
1072 trace_seq_puts(s, "!: ");
1073 else if (rel_usecs > 1)
1074 trace_seq_puts(s, "+: ");
1076 trace_seq_puts(s, " : ");
1079 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1082 print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1084 struct trace_seq *s = &iter->seq;
1085 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1086 struct trace_entry *next_entry = find_next_entry(iter, NULL);
1087 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1088 struct trace_entry *entry = iter->ent;
1089 unsigned long abs_usecs;
1090 unsigned long rel_usecs;
1096 rel_usecs = ns2usecs(next_entry->t - entry->t);
1097 abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
1100 comm = trace_find_cmdline(entry->pid);
1101 trace_seq_printf(s, "%16s %5d %d %d %08x %08x [%08lx]"
1102 " %ld.%03ldms (+%ld.%03ldms): ",
1104 entry->pid, cpu, entry->flags,
1105 entry->preempt_count, trace_idx,
1108 abs_usecs % 1000, rel_usecs/1000,
1111 lat_print_generic(s, entry, cpu);
1112 lat_print_timestamp(s, abs_usecs, rel_usecs);
1114 switch (entry->type) {
1116 seq_print_ip_sym(s, entry->fn.ip, sym_flags);
1117 trace_seq_puts(s, " (");
1118 seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags);
1119 trace_seq_puts(s, ")\n");
1122 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1123 state_to_char[entry->ctx.prev_state] : 'X';
1124 comm = trace_find_cmdline(entry->ctx.next_pid);
1125 trace_seq_printf(s, " %d:%d:%c --> %d:%d %s\n",
1126 entry->ctx.prev_pid,
1127 entry->ctx.prev_prio,
1129 entry->ctx.next_pid,
1130 entry->ctx.next_prio,
1134 trace_seq_printf(s, " %lx %lx %lx\n",
1135 entry->special.arg1,
1136 entry->special.arg2,
1137 entry->special.arg3);
1140 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1145 static notrace int print_trace_fmt(struct trace_iterator *iter)
1147 struct trace_seq *s = &iter->seq;
1148 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1149 struct trace_entry *entry;
1150 unsigned long usec_rem;
1151 unsigned long long t;
1159 comm = trace_find_cmdline(iter->ent->pid);
1161 t = ns2usecs(entry->t);
1162 usec_rem = do_div(t, 1000000ULL);
1163 secs = (unsigned long)t;
1165 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
1168 ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
1171 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1175 switch (entry->type) {
1177 ret = seq_print_ip_sym(s, entry->fn.ip, sym_flags);
1180 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
1181 entry->fn.parent_ip) {
1182 ret = trace_seq_printf(s, " <-");
1185 ret = seq_print_ip_sym(s, entry->fn.parent_ip,
1190 ret = trace_seq_printf(s, "\n");
1195 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1196 state_to_char[entry->ctx.prev_state] : 'X';
1197 ret = trace_seq_printf(s, " %d:%d:%c ==> %d:%d\n",
1198 entry->ctx.prev_pid,
1199 entry->ctx.prev_prio,
1201 entry->ctx.next_pid,
1202 entry->ctx.next_prio);
1207 ret = trace_seq_printf(s, " %lx %lx %lx\n",
1208 entry->special.arg1,
1209 entry->special.arg2,
1210 entry->special.arg3);
1218 static notrace int print_raw_fmt(struct trace_iterator *iter)
1220 struct trace_seq *s = &iter->seq;
1221 struct trace_entry *entry;
1227 ret = trace_seq_printf(s, "%d %d %llu ",
1228 entry->pid, iter->cpu, entry->t);
1232 switch (entry->type) {
1234 ret = trace_seq_printf(s, "%x %x\n",
1235 entry->fn.ip, entry->fn.parent_ip);
1240 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1241 state_to_char[entry->ctx.prev_state] : 'X';
1242 ret = trace_seq_printf(s, "%d %d %c %d %d\n",
1243 entry->ctx.prev_pid,
1244 entry->ctx.prev_prio,
1246 entry->ctx.next_pid,
1247 entry->ctx.next_prio);
1252 ret = trace_seq_printf(s, " %lx %lx %lx\n",
1253 entry->special.arg1,
1254 entry->special.arg2,
1255 entry->special.arg3);
1263 #define SEQ_PUT_FIELD_RET(s, x) \
1265 if (!trace_seq_putmem(s, &(x), sizeof(x))) \
1269 static notrace int print_bin_fmt(struct trace_iterator *iter)
1271 struct trace_seq *s = &iter->seq;
1272 struct trace_entry *entry;
1276 SEQ_PUT_FIELD_RET(s, entry->pid);
1277 SEQ_PUT_FIELD_RET(s, entry->cpu);
1278 SEQ_PUT_FIELD_RET(s, entry->t);
1280 switch (entry->type) {
1282 SEQ_PUT_FIELD_RET(s, entry->fn.ip);
1283 SEQ_PUT_FIELD_RET(s, entry->fn.parent_ip);
1286 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_pid);
1287 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_prio);
1288 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_state);
1289 SEQ_PUT_FIELD_RET(s, entry->ctx.next_pid);
1290 SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio);
1293 SEQ_PUT_FIELD_RET(s, entry->special.arg1);
1294 SEQ_PUT_FIELD_RET(s, entry->special.arg2);
1295 SEQ_PUT_FIELD_RET(s, entry->special.arg3);
1301 static int trace_empty(struct trace_iterator *iter)
1303 struct trace_array_cpu *data;
1306 for_each_possible_cpu(cpu) {
1307 data = iter->tr->data[cpu];
1309 if (head_page(data) && data->trace_idx &&
1310 (data->trace_tail != data->trace_head ||
1311 data->trace_tail_idx != data->trace_head_idx))
1317 static int print_trace_line(struct trace_iterator *iter)
1319 if (trace_flags & TRACE_ITER_BIN)
1320 return print_bin_fmt(iter);
1322 if (trace_flags & TRACE_ITER_RAW)
1323 return print_raw_fmt(iter);
1325 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
1326 return print_lat_fmt(iter, iter->idx, iter->cpu);
1328 return print_trace_fmt(iter);
1331 static int s_show(struct seq_file *m, void *v)
1333 struct trace_iterator *iter = v;
1335 if (iter->ent == NULL) {
1337 seq_printf(m, "# tracer: %s\n", iter->trace->name);
1340 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1341 /* print nothing if the buffers are empty */
1342 if (trace_empty(iter))
1344 print_trace_header(m, iter);
1345 if (!(trace_flags & TRACE_ITER_VERBOSE))
1346 print_lat_help_header(m);
1348 if (!(trace_flags & TRACE_ITER_VERBOSE))
1349 print_func_help_header(m);
1352 print_trace_line(iter);
1353 trace_print_seq(m, &iter->seq);
1359 static struct seq_operations tracer_seq_ops = {
1366 static struct trace_iterator notrace *
1367 __tracing_open(struct inode *inode, struct file *file, int *ret)
1369 struct trace_iterator *iter;
1371 if (tracing_disabled) {
1376 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1382 mutex_lock(&trace_types_lock);
1383 if (current_trace && current_trace->print_max)
1386 iter->tr = inode->i_private;
1387 iter->trace = current_trace;
1390 /* TODO stop tracer */
1391 *ret = seq_open(file, &tracer_seq_ops);
1393 struct seq_file *m = file->private_data;
1396 /* stop the trace while dumping */
1400 if (iter->trace && iter->trace->open)
1401 iter->trace->open(iter);
1406 mutex_unlock(&trace_types_lock);
1412 int tracing_open_generic(struct inode *inode, struct file *filp)
1414 if (tracing_disabled)
1417 filp->private_data = inode->i_private;
1421 int tracing_release(struct inode *inode, struct file *file)
1423 struct seq_file *m = (struct seq_file *)file->private_data;
1424 struct trace_iterator *iter = m->private;
1426 mutex_lock(&trace_types_lock);
1427 if (iter->trace && iter->trace->close)
1428 iter->trace->close(iter);
1430 /* reenable tracing if it was previously enabled */
1433 mutex_unlock(&trace_types_lock);
1435 seq_release(inode, file);
1440 static int tracing_open(struct inode *inode, struct file *file)
1444 __tracing_open(inode, file, &ret);
1449 static int tracing_lt_open(struct inode *inode, struct file *file)
1451 struct trace_iterator *iter;
1454 iter = __tracing_open(inode, file, &ret);
1457 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1463 static notrace void *
1464 t_next(struct seq_file *m, void *v, loff_t *pos)
1466 struct tracer *t = m->private;
1478 static void *t_start(struct seq_file *m, loff_t *pos)
1480 struct tracer *t = m->private;
1483 mutex_lock(&trace_types_lock);
1484 for (; t && l < *pos; t = t_next(m, t, &l))
1490 static void t_stop(struct seq_file *m, void *p)
1492 mutex_unlock(&trace_types_lock);
1495 static int t_show(struct seq_file *m, void *v)
1497 struct tracer *t = v;
1502 seq_printf(m, "%s", t->name);
1511 static struct seq_operations show_traces_seq_ops = {
1518 static int show_traces_open(struct inode *inode, struct file *file)
1522 if (tracing_disabled)
1525 ret = seq_open(file, &show_traces_seq_ops);
1527 struct seq_file *m = file->private_data;
1528 m->private = trace_types;
1534 static struct file_operations tracing_fops = {
1535 .open = tracing_open,
1537 .llseek = seq_lseek,
1538 .release = tracing_release,
1541 static struct file_operations tracing_lt_fops = {
1542 .open = tracing_lt_open,
1544 .llseek = seq_lseek,
1545 .release = tracing_release,
1548 static struct file_operations show_traces_fops = {
1549 .open = show_traces_open,
1551 .release = seq_release,
1555 tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
1556 size_t cnt, loff_t *ppos)
1563 /* calulate max size */
1564 for (i = 0; trace_options[i]; i++) {
1565 len += strlen(trace_options[i]);
1566 len += 3; /* "no" and space */
1569 /* +2 for \n and \0 */
1570 buf = kmalloc(len + 2, GFP_KERNEL);
1574 for (i = 0; trace_options[i]; i++) {
1575 if (trace_flags & (1 << i))
1576 r += sprintf(buf + r, "%s ", trace_options[i]);
1578 r += sprintf(buf + r, "no%s ", trace_options[i]);
1581 r += sprintf(buf + r, "\n");
1582 WARN_ON(r >= len + 2);
1584 r = simple_read_from_buffer(ubuf, cnt, ppos,
1593 tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
1594 size_t cnt, loff_t *ppos)
1604 if (copy_from_user(&buf, ubuf, cnt))
1609 if (strncmp(buf, "no", 2) == 0) {
1614 for (i = 0; trace_options[i]; i++) {
1615 int len = strlen(trace_options[i]);
1617 if (strncmp(cmp, trace_options[i], len) == 0) {
1619 trace_flags &= ~(1 << i);
1621 trace_flags |= (1 << i);
1631 static struct file_operations tracing_iter_fops = {
1632 .open = tracing_open_generic,
1633 .read = tracing_iter_ctrl_read,
1634 .write = tracing_iter_ctrl_write,
1637 static const char readme_msg[] =
1638 "tracing mini-HOWTO:\n\n"
1640 "# mount -t debugfs nodev /debug\n\n"
1641 "# cat /debug/tracing/available_tracers\n"
1642 "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
1643 "# cat /debug/tracing/current_tracer\n"
1645 "# echo sched_switch > /debug/tracing/current_tracer\n"
1646 "# cat /debug/tracing/current_tracer\n"
1648 "# cat /debug/tracing/iter_ctrl\n"
1649 "noprint-parent nosym-offset nosym-addr noverbose\n"
1650 "# echo print-parent > /debug/tracing/iter_ctrl\n"
1651 "# echo 1 > /debug/tracing/tracing_enabled\n"
1652 "# cat /debug/tracing/trace > /tmp/trace.txt\n"
1653 "echo 0 > /debug/tracing/tracing_enabled\n"
1657 tracing_readme_read(struct file *filp, char __user *ubuf,
1658 size_t cnt, loff_t *ppos)
1660 return simple_read_from_buffer(ubuf, cnt, ppos,
1661 readme_msg, strlen(readme_msg));
1664 static struct file_operations tracing_readme_fops = {
1665 .open = tracing_open_generic,
1666 .read = tracing_readme_read,
1671 tracing_ctrl_read(struct file *filp, char __user *ubuf,
1672 size_t cnt, loff_t *ppos)
1674 struct trace_array *tr = filp->private_data;
1678 r = sprintf(buf, "%ld\n", tr->ctrl);
1679 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1683 tracing_ctrl_write(struct file *filp, const char __user *ubuf,
1684 size_t cnt, loff_t *ppos)
1686 struct trace_array *tr = filp->private_data;
1693 if (copy_from_user(&buf, ubuf, cnt))
1698 val = simple_strtoul(buf, NULL, 10);
1702 mutex_lock(&trace_types_lock);
1703 if (tr->ctrl ^ val) {
1711 if (current_trace && current_trace->ctrl_update)
1712 current_trace->ctrl_update(tr);
1714 mutex_unlock(&trace_types_lock);
1722 tracing_set_trace_read(struct file *filp, char __user *ubuf,
1723 size_t cnt, loff_t *ppos)
1725 char buf[max_tracer_type_len+2];
1728 mutex_lock(&trace_types_lock);
1730 r = sprintf(buf, "%s\n", current_trace->name);
1732 r = sprintf(buf, "\n");
1733 mutex_unlock(&trace_types_lock);
1735 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1739 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
1740 size_t cnt, loff_t *ppos)
1742 struct trace_array *tr = &global_trace;
1744 char buf[max_tracer_type_len+1];
1747 if (cnt > max_tracer_type_len)
1748 cnt = max_tracer_type_len;
1750 if (copy_from_user(&buf, ubuf, cnt))
1755 /* strip ending whitespace. */
1756 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
1759 mutex_lock(&trace_types_lock);
1760 for (t = trace_types; t; t = t->next) {
1761 if (strcmp(t->name, buf) == 0)
1764 if (!t || t == current_trace)
1767 if (current_trace && current_trace->reset)
1768 current_trace->reset(tr);
1775 mutex_unlock(&trace_types_lock);
1783 tracing_max_lat_read(struct file *filp, char __user *ubuf,
1784 size_t cnt, loff_t *ppos)
1786 unsigned long *ptr = filp->private_data;
1790 r = snprintf(buf, 64, "%ld\n",
1791 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
1794 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1798 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
1799 size_t cnt, loff_t *ppos)
1801 long *ptr = filp->private_data;
1808 if (copy_from_user(&buf, ubuf, cnt))
1813 val = simple_strtoul(buf, NULL, 10);
1820 static atomic_t tracing_reader;
1822 static int tracing_open_pipe(struct inode *inode, struct file *filp)
1824 struct trace_iterator *iter;
1826 if (tracing_disabled)
1829 /* We only allow for reader of the pipe */
1830 if (atomic_inc_return(&tracing_reader) != 1) {
1831 atomic_dec(&tracing_reader);
1835 /* create a buffer to store the information to pass to userspace */
1836 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1840 iter->tr = &global_trace;
1842 filp->private_data = iter;
1847 static int tracing_release_pipe(struct inode *inode, struct file *file)
1849 struct trace_iterator *iter = file->private_data;
1852 atomic_dec(&tracing_reader);
1861 tracing_read_pipe(struct file *filp, char __user *ubuf,
1862 size_t cnt, loff_t *ppos)
1864 struct trace_iterator *iter = filp->private_data;
1865 struct trace_array_cpu *data;
1866 static cpumask_t mask;
1868 unsigned long flags;
1874 /* return any leftover data */
1875 if (iter->seq.len > start) {
1876 len = iter->seq.len - start;
1879 ret = copy_to_user(ubuf, iter->seq.buffer + start, cnt);
1888 trace_seq_reset(&iter->seq);
1891 while (trace_empty(iter)) {
1893 * This is a make-shift waitqueue. The reason we don't use
1894 * an actual wait queue is because:
1895 * 1) we only ever have one waiter
1896 * 2) the tracing, traces all functions, we don't want
1897 * the overhead of calling wake_up and friends
1898 * (and tracing them too)
1899 * Anyway, this is really very primitive wakeup.
1901 set_current_state(TASK_INTERRUPTIBLE);
1902 iter->tr->waiter = current;
1904 /* sleep for one second, and try again. */
1905 schedule_timeout(HZ);
1907 iter->tr->waiter = NULL;
1909 if (signal_pending(current))
1913 * We block until we read something and tracing is disabled.
1914 * We still block if tracing is disabled, but we have never
1915 * read anything. This allows a user to cat this file, and
1916 * then enable tracing. But after we have read something,
1917 * we give an EOF when tracing is again disabled.
1919 * iter->pos will be 0 if we haven't read anything.
1921 if (!tracer_enabled && iter->pos)
1927 /* stop when tracing is finished */
1928 if (trace_empty(iter))
1931 if (cnt >= PAGE_SIZE)
1932 cnt = PAGE_SIZE - 1;
1934 memset(iter, 0, sizeof(*iter));
1935 iter->tr = &global_trace;
1939 * We need to stop all tracing on all CPUS to read the
1940 * the next buffer. This is a bit expensive, but is
1941 * not done often. We fill all what we can read,
1942 * and then release the locks again.
1946 local_irq_save(flags);
1947 for_each_possible_cpu(cpu) {
1948 data = iter->tr->data[cpu];
1950 if (!head_page(data) || !data->trace_idx)
1953 atomic_inc(&data->disabled);
1954 spin_lock(&data->lock);
1958 while (find_next_entry_inc(iter) != NULL) {
1959 int len = iter->seq.len;
1961 ret = print_trace_line(iter);
1963 /* don't print partial lines */
1964 iter->seq.len = len;
1968 trace_consume(iter);
1970 if (iter->seq.len >= cnt)
1974 for_each_cpu_mask(cpu, mask) {
1975 data = iter->tr->data[cpu];
1976 spin_unlock(&data->lock);
1977 atomic_dec(&data->disabled);
1979 local_irq_restore(flags);
1981 /* Now copy what we have to the user */
1982 read = iter->seq.len;
1986 ret = copy_to_user(ubuf, iter->seq.buffer, read);
1988 if (read < iter->seq.len)
1991 trace_seq_reset(&iter->seq);
1999 static struct file_operations tracing_max_lat_fops = {
2000 .open = tracing_open_generic,
2001 .read = tracing_max_lat_read,
2002 .write = tracing_max_lat_write,
2005 static struct file_operations tracing_ctrl_fops = {
2006 .open = tracing_open_generic,
2007 .read = tracing_ctrl_read,
2008 .write = tracing_ctrl_write,
2011 static struct file_operations set_tracer_fops = {
2012 .open = tracing_open_generic,
2013 .read = tracing_set_trace_read,
2014 .write = tracing_set_trace_write,
2017 static struct file_operations tracing_pipe_fops = {
2018 .open = tracing_open_pipe,
2019 .read = tracing_read_pipe,
2020 .release = tracing_release_pipe,
2023 #ifdef CONFIG_DYNAMIC_FTRACE
2026 tracing_read_long(struct file *filp, char __user *ubuf,
2027 size_t cnt, loff_t *ppos)
2029 unsigned long *p = filp->private_data;
2033 r = sprintf(buf, "%ld\n", *p);
2035 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2038 static struct file_operations tracing_read_long_fops = {
2039 .open = tracing_open_generic,
2040 .read = tracing_read_long,
2044 static struct dentry *d_tracer;
2046 struct dentry *tracing_init_dentry(void)
2053 d_tracer = debugfs_create_dir("tracing", NULL);
2055 if (!d_tracer && !once) {
2057 pr_warning("Could not create debugfs directory 'tracing'\n");
2064 #ifdef CONFIG_FTRACE_SELFTEST
2065 /* Let selftest have access to static functions in this file */
2066 #include "trace_selftest.c"
2069 static __init void tracer_init_debugfs(void)
2071 struct dentry *d_tracer;
2072 struct dentry *entry;
2074 d_tracer = tracing_init_dentry();
2076 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
2077 &global_trace, &tracing_ctrl_fops);
2079 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
2081 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
2082 NULL, &tracing_iter_fops);
2084 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
2086 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
2087 &global_trace, &tracing_lt_fops);
2089 pr_warning("Could not create debugfs 'latency_trace' entry\n");
2091 entry = debugfs_create_file("trace", 0444, d_tracer,
2092 &global_trace, &tracing_fops);
2094 pr_warning("Could not create debugfs 'trace' entry\n");
2096 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
2097 &global_trace, &show_traces_fops);
2099 pr_warning("Could not create debugfs 'trace' entry\n");
2101 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
2102 &global_trace, &set_tracer_fops);
2104 pr_warning("Could not create debugfs 'trace' entry\n");
2106 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
2107 &tracing_max_latency,
2108 &tracing_max_lat_fops);
2110 pr_warning("Could not create debugfs "
2111 "'tracing_max_latency' entry\n");
2113 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
2114 &tracing_thresh, &tracing_max_lat_fops);
2116 pr_warning("Could not create debugfs "
2117 "'tracing_threash' entry\n");
2118 entry = debugfs_create_file("README", 0644, d_tracer,
2119 NULL, &tracing_readme_fops);
2121 pr_warning("Could not create debugfs 'README' entry\n");
2123 entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
2124 NULL, &tracing_pipe_fops);
2126 pr_warning("Could not create debugfs "
2127 "'tracing_threash' entry\n");
2129 #ifdef CONFIG_DYNAMIC_FTRACE
2130 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
2131 &ftrace_update_tot_cnt,
2132 &tracing_read_long_fops);
2134 pr_warning("Could not create debugfs "
2135 "'dyn_ftrace_total_info' entry\n");
2139 /* dummy trace to disable tracing */
2140 static struct tracer no_tracer __read_mostly =
2145 static int trace_alloc_page(void)
2147 struct trace_array_cpu *data;
2148 struct page *page, *tmp;
2153 /* first allocate a page for each CPU */
2154 for_each_possible_cpu(i) {
2155 array = (void *)__get_free_page(GFP_KERNEL);
2156 if (array == NULL) {
2157 printk(KERN_ERR "tracer: failed to allocate page"
2158 "for trace buffer!\n");
2162 page = virt_to_page(array);
2163 list_add(&page->lru, &pages);
2165 /* Only allocate if we are actually using the max trace */
2166 #ifdef CONFIG_TRACER_MAX_TRACE
2167 array = (void *)__get_free_page(GFP_KERNEL);
2168 if (array == NULL) {
2169 printk(KERN_ERR "tracer: failed to allocate page"
2170 "for trace buffer!\n");
2173 page = virt_to_page(array);
2174 list_add(&page->lru, &pages);
2178 /* Now that we successfully allocate a page per CPU, add them */
2179 for_each_possible_cpu(i) {
2180 data = global_trace.data[i];
2181 spin_lock_init(&data->lock);
2182 lockdep_set_class(&data->lock, &data->lock_key);
2183 page = list_entry(pages.next, struct page, lru);
2184 list_del_init(&page->lru);
2185 list_add_tail(&page->lru, &data->trace_pages);
2188 #ifdef CONFIG_TRACER_MAX_TRACE
2189 data = max_tr.data[i];
2190 spin_lock_init(&data->lock);
2191 lockdep_set_class(&data->lock, &data->lock_key);
2192 page = list_entry(pages.next, struct page, lru);
2193 list_del_init(&page->lru);
2194 list_add_tail(&page->lru, &data->trace_pages);
2198 global_trace.entries += ENTRIES_PER_PAGE;
2203 list_for_each_entry_safe(page, tmp, &pages, lru) {
2204 list_del_init(&page->lru);
2210 __init static int tracer_alloc_buffers(void)
2212 struct trace_array_cpu *data;
2219 global_trace.ctrl = tracer_enabled;
2221 /* Allocate the first page for all buffers */
2222 for_each_possible_cpu(i) {
2223 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
2224 max_tr.data[i] = &per_cpu(max_data, i);
2226 array = (void *)__get_free_page(GFP_KERNEL);
2227 if (array == NULL) {
2228 printk(KERN_ERR "tracer: failed to allocate page"
2229 "for trace buffer!\n");
2233 /* set the array to the list */
2234 INIT_LIST_HEAD(&data->trace_pages);
2235 page = virt_to_page(array);
2236 list_add(&page->lru, &data->trace_pages);
2237 /* use the LRU flag to differentiate the two buffers */
2240 /* Only allocate if we are actually using the max trace */
2241 #ifdef CONFIG_TRACER_MAX_TRACE
2242 array = (void *)__get_free_page(GFP_KERNEL);
2243 if (array == NULL) {
2244 printk(KERN_ERR "tracer: failed to allocate page"
2245 "for trace buffer!\n");
2249 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
2250 page = virt_to_page(array);
2251 list_add(&page->lru, &max_tr.data[i]->trace_pages);
2257 * Since we allocate by orders of pages, we may be able to
2260 global_trace.entries = ENTRIES_PER_PAGE;
2263 while (global_trace.entries < trace_nr_entries) {
2264 if (trace_alloc_page())
2268 max_tr.entries = global_trace.entries;
2270 pr_info("tracer: %d pages allocated for %ld",
2271 pages, trace_nr_entries);
2272 pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
2273 pr_info(" actual entries %ld\n", global_trace.entries);
2275 tracer_init_debugfs();
2277 trace_init_cmdlines();
2279 register_tracer(&no_tracer);
2280 current_trace = &no_tracer;
2282 /* All seems OK, enable tracing */
2283 tracing_disabled = 0;
2288 for (i-- ; i >= 0; i--) {
2289 struct page *page, *tmp;
2290 struct trace_array_cpu *data = global_trace.data[i];
2293 list_for_each_entry_safe(page, tmp,
2294 &data->trace_pages, lru) {
2295 list_del_init(&page->lru);
2300 #ifdef CONFIG_TRACER_MAX_TRACE
2301 data = max_tr.data[i];
2303 list_for_each_entry_safe(page, tmp,
2304 &data->trace_pages, lru) {
2305 list_del_init(&page->lru);
2313 fs_initcall(tracer_alloc_buffers);