2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
14 #include <linux/utsrelease.h>
15 #include <linux/kallsyms.h>
16 #include <linux/seq_file.h>
17 #include <linux/debugfs.h>
18 #include <linux/pagemap.h>
19 #include <linux/hardirq.h>
20 #include <linux/linkage.h>
21 #include <linux/uaccess.h>
22 #include <linux/ftrace.h>
23 #include <linux/module.h>
24 #include <linux/percpu.h>
25 #include <linux/ctype.h>
26 #include <linux/init.h>
27 #include <linux/poll.h>
28 #include <linux/gfp.h>
31 #include <linux/stacktrace.h>
35 unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
36 unsigned long __read_mostly tracing_thresh;
38 static int tracing_disabled = 1;
41 ns2usecs(cycle_t nsec)
48 cycle_t ftrace_now(int cpu)
50 return cpu_clock(cpu);
53 static struct trace_array global_trace;
55 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
57 static struct trace_array max_tr;
59 static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
61 static int tracer_enabled = 1;
62 static unsigned long trace_nr_entries = 65536UL;
64 static struct tracer *trace_types __read_mostly;
65 static struct tracer *current_trace __read_mostly;
66 static int max_tracer_type_len;
68 static DEFINE_MUTEX(trace_types_lock);
69 static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
71 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
73 void trace_wake_up(void)
76 * The runqueue_is_locked() can fail, but this is the best we
79 if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
83 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
85 static int __init set_nr_entries(char *str)
89 trace_nr_entries = simple_strtoul(str, &str, 0);
92 __setup("trace_entries=", set_nr_entries);
94 unsigned long nsecs_to_usecs(unsigned long nsecs)
100 __TRACE_FIRST_TYPE = 0,
111 enum trace_flag_type {
112 TRACE_FLAG_IRQS_OFF = 0x01,
113 TRACE_FLAG_NEED_RESCHED = 0x02,
114 TRACE_FLAG_HARDIRQ = 0x04,
115 TRACE_FLAG_SOFTIRQ = 0x08,
118 #define TRACE_ITER_SYM_MASK \
119 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
121 /* These must match the bit postions above */
122 static const char *trace_options[] = {
136 static raw_spinlock_t ftrace_max_lock =
137 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
140 * Copy the new maximum trace into the separate maximum-trace
141 * structure. (this way the maximum trace is permanently saved,
142 * for later retrieval via /debugfs/tracing/latency_trace)
145 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
147 struct trace_array_cpu *data = tr->data[cpu];
150 max_tr.time_start = data->preempt_timestamp;
152 data = max_tr.data[cpu];
153 data->saved_latency = tracing_max_latency;
155 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
156 data->pid = tsk->pid;
157 data->uid = tsk->uid;
158 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
159 data->policy = tsk->policy;
160 data->rt_priority = tsk->rt_priority;
162 /* record this tasks comm */
163 tracing_record_cmdline(current);
166 void check_pages(struct trace_array_cpu *data)
168 struct page *page, *tmp;
170 BUG_ON(data->trace_pages.next->prev != &data->trace_pages);
171 BUG_ON(data->trace_pages.prev->next != &data->trace_pages);
173 list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
174 BUG_ON(page->lru.next->prev != &page->lru);
175 BUG_ON(page->lru.prev->next != &page->lru);
179 void *head_page(struct trace_array_cpu *data)
184 if (list_empty(&data->trace_pages))
187 page = list_entry(data->trace_pages.next, struct page, lru);
188 BUG_ON(&page->lru == &data->trace_pages);
190 return page_address(page);
194 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
196 int len = (PAGE_SIZE - 1) - s->len;
204 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
207 /* If we can't write it all, don't bother writing anything */
217 trace_seq_puts(struct trace_seq *s, const char *str)
219 int len = strlen(str);
221 if (len > ((PAGE_SIZE - 1) - s->len))
224 memcpy(s->buffer + s->len, str, len);
231 trace_seq_putc(struct trace_seq *s, unsigned char c)
233 if (s->len >= (PAGE_SIZE - 1))
236 s->buffer[s->len++] = c;
242 trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
244 if (len > ((PAGE_SIZE - 1) - s->len))
247 memcpy(s->buffer + s->len, mem, len);
256 trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
258 unsigned char hex[HEX_CHARS];
263 BUG_ON(len >= HEX_CHARS);
268 for (i = 0, j = 0; i < len; i++) {
270 for (i = len-1, j = 0; i >= 0; i--) {
274 hex[j] = byte & 0x0f;
291 return trace_seq_putmem(s, hex, j);
295 trace_seq_reset(struct trace_seq *s)
301 trace_print_seq(struct seq_file *m, struct trace_seq *s)
303 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
306 seq_puts(m, s->buffer);
312 flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
314 struct list_head flip_pages;
316 INIT_LIST_HEAD(&flip_pages);
318 memcpy(&tr1->trace_head_idx, &tr2->trace_head_idx,
319 sizeof(struct trace_array_cpu) -
320 offsetof(struct trace_array_cpu, trace_head_idx));
324 list_splice_init(&tr1->trace_pages, &flip_pages);
325 list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
326 list_splice_init(&flip_pages, &tr2->trace_pages);
327 BUG_ON(!list_empty(&flip_pages));
333 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
335 struct trace_array_cpu *data;
338 WARN_ON_ONCE(!irqs_disabled());
339 __raw_spin_lock(&ftrace_max_lock);
340 /* clear out all the previous traces */
341 for_each_possible_cpu(i) {
343 flip_trace(max_tr.data[i], data);
347 __update_max_tr(tr, tsk, cpu);
348 __raw_spin_unlock(&ftrace_max_lock);
352 * update_max_tr_single - only copy one trace over, and reset the rest
354 * @tsk - task with the latency
355 * @cpu - the cpu of the buffer to copy.
358 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
360 struct trace_array_cpu *data = tr->data[cpu];
363 WARN_ON_ONCE(!irqs_disabled());
364 __raw_spin_lock(&ftrace_max_lock);
365 for_each_possible_cpu(i)
366 tracing_reset(max_tr.data[i]);
368 flip_trace(max_tr.data[cpu], data);
371 __update_max_tr(tr, tsk, cpu);
372 __raw_spin_unlock(&ftrace_max_lock);
375 int register_tracer(struct tracer *type)
382 pr_info("Tracer must have a name\n");
386 mutex_lock(&trace_types_lock);
387 for (t = trace_types; t; t = t->next) {
388 if (strcmp(type->name, t->name) == 0) {
390 pr_info("Trace %s already registered\n",
397 #ifdef CONFIG_FTRACE_STARTUP_TEST
398 if (type->selftest) {
399 struct tracer *saved_tracer = current_trace;
400 struct trace_array_cpu *data;
401 struct trace_array *tr = &global_trace;
402 int saved_ctrl = tr->ctrl;
405 * Run a selftest on this tracer.
406 * Here we reset the trace buffer, and set the current
407 * tracer to be this tracer. The tracer can then run some
408 * internal tracing to verify that everything is in order.
409 * If we fail, we do not register this tracer.
411 for_each_possible_cpu(i) {
413 if (!head_page(data))
417 current_trace = type;
419 /* the test is responsible for initializing and enabling */
420 pr_info("Testing tracer %s: ", type->name);
421 ret = type->selftest(type, tr);
422 /* the test is responsible for resetting too */
423 current_trace = saved_tracer;
424 tr->ctrl = saved_ctrl;
426 printk(KERN_CONT "FAILED!\n");
429 /* Only reset on passing, to avoid touching corrupted buffers */
430 for_each_possible_cpu(i) {
432 if (!head_page(data))
436 printk(KERN_CONT "PASSED\n");
440 type->next = trace_types;
442 len = strlen(type->name);
443 if (len > max_tracer_type_len)
444 max_tracer_type_len = len;
447 mutex_unlock(&trace_types_lock);
452 void unregister_tracer(struct tracer *type)
457 mutex_lock(&trace_types_lock);
458 for (t = &trace_types; *t; t = &(*t)->next) {
462 pr_info("Trace %s not registered\n", type->name);
467 if (strlen(type->name) != max_tracer_type_len)
470 max_tracer_type_len = 0;
471 for (t = &trace_types; *t; t = &(*t)->next) {
472 len = strlen((*t)->name);
473 if (len > max_tracer_type_len)
474 max_tracer_type_len = len;
477 mutex_unlock(&trace_types_lock);
480 void tracing_reset(struct trace_array_cpu *data)
483 data->trace_head = data->trace_tail = head_page(data);
484 data->trace_head_idx = 0;
485 data->trace_tail_idx = 0;
488 #define SAVED_CMDLINES 128
489 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
490 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
491 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
492 static int cmdline_idx;
493 static DEFINE_SPINLOCK(trace_cmdline_lock);
494 atomic_t trace_record_cmdline_disabled;
496 static void trace_init_cmdlines(void)
498 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
499 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
503 void trace_stop_cmdline_recording(void);
505 static void trace_save_cmdline(struct task_struct *tsk)
510 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
514 * It's not the end of the world if we don't get
515 * the lock, but we also don't want to spin
516 * nor do we want to disable interrupts,
517 * so if we miss here, then better luck next time.
519 if (!spin_trylock(&trace_cmdline_lock))
522 idx = map_pid_to_cmdline[tsk->pid];
523 if (idx >= SAVED_CMDLINES) {
524 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
526 map = map_cmdline_to_pid[idx];
527 if (map <= PID_MAX_DEFAULT)
528 map_pid_to_cmdline[map] = (unsigned)-1;
530 map_pid_to_cmdline[tsk->pid] = idx;
535 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
537 spin_unlock(&trace_cmdline_lock);
540 static char *trace_find_cmdline(int pid)
542 char *cmdline = "<...>";
548 if (pid > PID_MAX_DEFAULT)
551 map = map_pid_to_cmdline[pid];
552 if (map >= SAVED_CMDLINES)
555 cmdline = saved_cmdlines[map];
561 void tracing_record_cmdline(struct task_struct *tsk)
563 if (atomic_read(&trace_record_cmdline_disabled))
566 trace_save_cmdline(tsk);
569 static inline struct list_head *
570 trace_next_list(struct trace_array_cpu *data, struct list_head *next)
573 * Roundrobin - but skip the head (which is not a real page):
576 if (unlikely(next == &data->trace_pages))
578 BUG_ON(next == &data->trace_pages);
584 trace_next_page(struct trace_array_cpu *data, void *addr)
586 struct list_head *next;
589 page = virt_to_page(addr);
591 next = trace_next_list(data, &page->lru);
592 page = list_entry(next, struct page, lru);
594 return page_address(page);
597 static inline struct trace_entry *
598 tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
600 unsigned long idx, idx_next;
601 struct trace_entry *entry;
604 idx = data->trace_head_idx;
607 BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
609 entry = data->trace_head + idx * TRACE_ENTRY_SIZE;
611 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
612 data->trace_head = trace_next_page(data, data->trace_head);
616 if (data->trace_head == data->trace_tail &&
617 idx_next == data->trace_tail_idx) {
619 data->trace_tail_idx++;
620 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
622 trace_next_page(data, data->trace_tail);
623 data->trace_tail_idx = 0;
627 data->trace_head_idx = idx_next;
633 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
635 struct task_struct *tsk = current;
638 pc = preempt_count();
640 entry->preempt_count = pc & 0xff;
641 entry->pid = tsk->pid;
642 entry->t = ftrace_now(raw_smp_processor_id());
643 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
644 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
645 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
646 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
650 trace_function(struct trace_array *tr, struct trace_array_cpu *data,
651 unsigned long ip, unsigned long parent_ip, unsigned long flags)
653 struct trace_entry *entry;
654 unsigned long irq_flags;
656 raw_local_irq_save(irq_flags);
657 __raw_spin_lock(&data->lock);
658 entry = tracing_get_trace_entry(tr, data);
659 tracing_generic_entry_update(entry, flags);
660 entry->type = TRACE_FN;
662 entry->fn.parent_ip = parent_ip;
663 __raw_spin_unlock(&data->lock);
664 raw_local_irq_restore(irq_flags);
668 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
669 unsigned long ip, unsigned long parent_ip, unsigned long flags)
671 if (likely(!atomic_read(&data->disabled)))
672 trace_function(tr, data, ip, parent_ip, flags);
676 __trace_special(void *__tr, void *__data,
677 unsigned long arg1, unsigned long arg2, unsigned long arg3)
679 struct trace_array_cpu *data = __data;
680 struct trace_array *tr = __tr;
681 struct trace_entry *entry;
682 unsigned long irq_flags;
684 raw_local_irq_save(irq_flags);
685 __raw_spin_lock(&data->lock);
686 entry = tracing_get_trace_entry(tr, data);
687 tracing_generic_entry_update(entry, 0);
688 entry->type = TRACE_SPECIAL;
689 entry->special.arg1 = arg1;
690 entry->special.arg2 = arg2;
691 entry->special.arg3 = arg3;
692 __raw_spin_unlock(&data->lock);
693 raw_local_irq_restore(irq_flags);
698 void __trace_stack(struct trace_array *tr,
699 struct trace_array_cpu *data,
703 struct trace_entry *entry;
704 struct stack_trace trace;
706 if (!(trace_flags & TRACE_ITER_STACKTRACE))
709 entry = tracing_get_trace_entry(tr, data);
710 tracing_generic_entry_update(entry, flags);
711 entry->type = TRACE_STACK;
713 memset(&entry->stack, 0, sizeof(entry->stack));
715 trace.nr_entries = 0;
716 trace.max_entries = FTRACE_STACK_ENTRIES;
718 trace.entries = entry->stack.caller;
720 save_stack_trace(&trace);
724 tracing_sched_switch_trace(struct trace_array *tr,
725 struct trace_array_cpu *data,
726 struct task_struct *prev,
727 struct task_struct *next,
730 struct trace_entry *entry;
731 unsigned long irq_flags;
733 raw_local_irq_save(irq_flags);
734 __raw_spin_lock(&data->lock);
735 entry = tracing_get_trace_entry(tr, data);
736 tracing_generic_entry_update(entry, flags);
737 entry->type = TRACE_CTX;
738 entry->ctx.prev_pid = prev->pid;
739 entry->ctx.prev_prio = prev->prio;
740 entry->ctx.prev_state = prev->state;
741 entry->ctx.next_pid = next->pid;
742 entry->ctx.next_prio = next->prio;
743 entry->ctx.next_state = next->state;
744 __trace_stack(tr, data, flags, 4);
745 __raw_spin_unlock(&data->lock);
746 raw_local_irq_restore(irq_flags);
750 tracing_sched_wakeup_trace(struct trace_array *tr,
751 struct trace_array_cpu *data,
752 struct task_struct *wakee,
753 struct task_struct *curr,
756 struct trace_entry *entry;
757 unsigned long irq_flags;
759 raw_local_irq_save(irq_flags);
760 __raw_spin_lock(&data->lock);
761 entry = tracing_get_trace_entry(tr, data);
762 tracing_generic_entry_update(entry, flags);
763 entry->type = TRACE_WAKE;
764 entry->ctx.prev_pid = curr->pid;
765 entry->ctx.prev_prio = curr->prio;
766 entry->ctx.prev_state = curr->state;
767 entry->ctx.next_pid = wakee->pid;
768 entry->ctx.next_prio = wakee->prio;
769 entry->ctx.next_state = wakee->state;
770 __trace_stack(tr, data, flags, 5);
771 __raw_spin_unlock(&data->lock);
772 raw_local_irq_restore(irq_flags);
779 function_trace_call(unsigned long ip, unsigned long parent_ip)
781 struct trace_array *tr = &global_trace;
782 struct trace_array_cpu *data;
787 if (unlikely(!tracer_enabled))
790 local_irq_save(flags);
791 cpu = raw_smp_processor_id();
792 data = tr->data[cpu];
793 disabled = atomic_inc_return(&data->disabled);
795 if (likely(disabled == 1))
796 trace_function(tr, data, ip, parent_ip, flags);
798 atomic_dec(&data->disabled);
799 local_irq_restore(flags);
802 static struct ftrace_ops trace_ops __read_mostly =
804 .func = function_trace_call,
807 void tracing_start_function_trace(void)
809 register_ftrace_function(&trace_ops);
812 void tracing_stop_function_trace(void)
814 unregister_ftrace_function(&trace_ops);
818 enum trace_file_type {
819 TRACE_FILE_LAT_FMT = 1,
822 static struct trace_entry *
823 trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
824 struct trace_iterator *iter, int cpu)
827 struct trace_entry *array;
829 if (iter->next_idx[cpu] >= tr->entries ||
830 iter->next_idx[cpu] >= data->trace_idx ||
831 (data->trace_head == data->trace_tail &&
832 data->trace_head_idx == data->trace_tail_idx))
835 if (!iter->next_page[cpu]) {
836 /* Initialize the iterator for this cpu trace buffer */
837 WARN_ON(!data->trace_tail);
838 page = virt_to_page(data->trace_tail);
839 iter->next_page[cpu] = &page->lru;
840 iter->next_page_idx[cpu] = data->trace_tail_idx;
843 page = list_entry(iter->next_page[cpu], struct page, lru);
844 BUG_ON(&data->trace_pages == &page->lru);
846 array = page_address(page);
848 WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE);
849 return &array[iter->next_page_idx[cpu]];
852 static struct trace_entry *
853 find_next_entry(struct trace_iterator *iter, int *ent_cpu)
855 struct trace_array *tr = iter->tr;
856 struct trace_entry *ent, *next = NULL;
860 for_each_possible_cpu(cpu) {
861 if (!head_page(tr->data[cpu]))
863 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
865 * Pick the entry with the smallest timestamp:
867 if (ent && (!next || ent->t < next->t)) {
879 static void trace_iterator_increment(struct trace_iterator *iter)
882 iter->next_idx[iter->cpu]++;
883 iter->next_page_idx[iter->cpu]++;
885 if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) {
886 struct trace_array_cpu *data = iter->tr->data[iter->cpu];
888 iter->next_page_idx[iter->cpu] = 0;
889 iter->next_page[iter->cpu] =
890 trace_next_list(data, iter->next_page[iter->cpu]);
894 static void trace_consume(struct trace_iterator *iter)
896 struct trace_array_cpu *data = iter->tr->data[iter->cpu];
898 data->trace_tail_idx++;
899 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
900 data->trace_tail = trace_next_page(data, data->trace_tail);
901 data->trace_tail_idx = 0;
904 /* Check if we empty it, then reset the index */
905 if (data->trace_head == data->trace_tail &&
906 data->trace_head_idx == data->trace_tail_idx)
910 static void *find_next_entry_inc(struct trace_iterator *iter)
912 struct trace_entry *next;
915 next = find_next_entry(iter, &next_cpu);
917 iter->prev_ent = iter->ent;
918 iter->prev_cpu = iter->cpu;
921 iter->cpu = next_cpu;
924 trace_iterator_increment(iter);
926 return next ? iter : NULL;
929 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
931 struct trace_iterator *iter = m->private;
932 void *last_ent = iter->ent;
938 /* can't go backwards */
943 ent = find_next_entry_inc(iter);
947 while (ent && iter->idx < i)
948 ent = find_next_entry_inc(iter);
952 if (last_ent && !ent)
953 seq_puts(m, "\n\nvim:ft=help\n");
958 static void *s_start(struct seq_file *m, loff_t *pos)
960 struct trace_iterator *iter = m->private;
965 mutex_lock(&trace_types_lock);
967 if (!current_trace || current_trace != iter->trace) {
968 mutex_unlock(&trace_types_lock);
972 atomic_inc(&trace_record_cmdline_disabled);
974 /* let the tracer grab locks here if needed */
975 if (current_trace->start)
976 current_trace->start(iter);
978 if (*pos != iter->pos) {
982 iter->prev_ent = NULL;
985 for_each_possible_cpu(i) {
986 iter->next_idx[i] = 0;
987 iter->next_page[i] = NULL;
990 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
995 p = s_next(m, p, &l);
1001 static void s_stop(struct seq_file *m, void *p)
1003 struct trace_iterator *iter = m->private;
1005 atomic_dec(&trace_record_cmdline_disabled);
1007 /* let the tracer release locks here if needed */
1008 if (current_trace && current_trace == iter->trace && iter->trace->stop)
1009 iter->trace->stop(iter);
1011 mutex_unlock(&trace_types_lock);
1015 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
1017 #ifdef CONFIG_KALLSYMS
1018 char str[KSYM_SYMBOL_LEN];
1020 kallsyms_lookup(address, NULL, NULL, NULL, str);
1022 return trace_seq_printf(s, fmt, str);
1028 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1029 unsigned long address)
1031 #ifdef CONFIG_KALLSYMS
1032 char str[KSYM_SYMBOL_LEN];
1034 sprint_symbol(str, address);
1035 return trace_seq_printf(s, fmt, str);
1040 #ifndef CONFIG_64BIT
1041 # define IP_FMT "%08lx"
1043 # define IP_FMT "%016lx"
1047 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1052 return trace_seq_printf(s, "0");
1054 if (sym_flags & TRACE_ITER_SYM_OFFSET)
1055 ret = seq_print_sym_offset(s, "%s", ip);
1057 ret = seq_print_sym_short(s, "%s", ip);
1062 if (sym_flags & TRACE_ITER_SYM_ADDR)
1063 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1067 static void print_lat_help_header(struct seq_file *m)
1069 seq_puts(m, "# _------=> CPU# \n");
1070 seq_puts(m, "# / _-----=> irqs-off \n");
1071 seq_puts(m, "# | / _----=> need-resched \n");
1072 seq_puts(m, "# || / _---=> hardirq/softirq \n");
1073 seq_puts(m, "# ||| / _--=> preempt-depth \n");
1074 seq_puts(m, "# |||| / \n");
1075 seq_puts(m, "# ||||| delay \n");
1076 seq_puts(m, "# cmd pid ||||| time | caller \n");
1077 seq_puts(m, "# \\ / ||||| \\ | / \n");
1080 static void print_func_help_header(struct seq_file *m)
1082 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
1083 seq_puts(m, "# | | | | |\n");
1088 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1090 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1091 struct trace_array *tr = iter->tr;
1092 struct trace_array_cpu *data = tr->data[tr->cpu];
1093 struct tracer *type = current_trace;
1094 unsigned long total = 0;
1095 unsigned long entries = 0;
1097 const char *name = "preemption";
1102 for_each_possible_cpu(cpu) {
1103 if (head_page(tr->data[cpu])) {
1104 total += tr->data[cpu]->trace_idx;
1105 if (tr->data[cpu]->trace_idx > tr->entries)
1106 entries += tr->entries;
1108 entries += tr->data[cpu]->trace_idx;
1112 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
1114 seq_puts(m, "-----------------------------------"
1115 "---------------------------------\n");
1116 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
1117 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
1118 nsecs_to_usecs(data->saved_latency),
1122 #if defined(CONFIG_PREEMPT_NONE)
1124 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
1126 #elif defined(CONFIG_PREEMPT_DESKTOP)
1131 /* These are reserved for later use */
1134 seq_printf(m, " #P:%d)\n", num_online_cpus());
1138 seq_puts(m, " -----------------\n");
1139 seq_printf(m, " | task: %.16s-%d "
1140 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1141 data->comm, data->pid, data->uid, data->nice,
1142 data->policy, data->rt_priority);
1143 seq_puts(m, " -----------------\n");
1145 if (data->critical_start) {
1146 seq_puts(m, " => started at: ");
1147 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1148 trace_print_seq(m, &iter->seq);
1149 seq_puts(m, "\n => ended at: ");
1150 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1151 trace_print_seq(m, &iter->seq);
1159 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1161 int hardirq, softirq;
1164 comm = trace_find_cmdline(entry->pid);
1166 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1167 trace_seq_printf(s, "%d", cpu);
1168 trace_seq_printf(s, "%c%c",
1169 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
1170 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
1172 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
1173 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
1174 if (hardirq && softirq)
1175 trace_seq_putc(s, 'H');
1178 trace_seq_putc(s, 'h');
1181 trace_seq_putc(s, 's');
1183 trace_seq_putc(s, '.');
1187 if (entry->preempt_count)
1188 trace_seq_printf(s, "%x", entry->preempt_count);
1190 trace_seq_puts(s, ".");
1193 unsigned long preempt_mark_thresh = 100;
1196 lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs,
1197 unsigned long rel_usecs)
1199 trace_seq_printf(s, " %4lldus", abs_usecs);
1200 if (rel_usecs > preempt_mark_thresh)
1201 trace_seq_puts(s, "!: ");
1202 else if (rel_usecs > 1)
1203 trace_seq_puts(s, "+: ");
1205 trace_seq_puts(s, " : ");
1208 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1211 print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1213 struct trace_seq *s = &iter->seq;
1214 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1215 struct trace_entry *next_entry = find_next_entry(iter, NULL);
1216 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1217 struct trace_entry *entry = iter->ent;
1218 unsigned long abs_usecs;
1219 unsigned long rel_usecs;
1226 rel_usecs = ns2usecs(next_entry->t - entry->t);
1227 abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
1230 comm = trace_find_cmdline(entry->pid);
1231 trace_seq_printf(s, "%16s %5d %d %d %08x %08x [%08lx]"
1232 " %ld.%03ldms (+%ld.%03ldms): ",
1234 entry->pid, cpu, entry->flags,
1235 entry->preempt_count, trace_idx,
1238 abs_usecs % 1000, rel_usecs/1000,
1241 lat_print_generic(s, entry, cpu);
1242 lat_print_timestamp(s, abs_usecs, rel_usecs);
1244 switch (entry->type) {
1246 seq_print_ip_sym(s, entry->fn.ip, sym_flags);
1247 trace_seq_puts(s, " (");
1248 seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags);
1249 trace_seq_puts(s, ")\n");
1253 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1254 state_to_char[entry->ctx.prev_state] : 'X';
1255 T = entry->ctx.next_state < sizeof(state_to_char) ?
1256 state_to_char[entry->ctx.next_state] : 'X';
1258 comm = trace_find_cmdline(entry->ctx.next_pid);
1259 trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c %s\n",
1260 entry->ctx.prev_pid,
1261 entry->ctx.prev_prio,
1262 S, entry->type == TRACE_CTX ? "==>" : " +",
1263 entry->ctx.next_pid,
1264 entry->ctx.next_prio,
1268 trace_seq_printf(s, "# %ld %ld %ld\n",
1269 entry->special.arg1,
1270 entry->special.arg2,
1271 entry->special.arg3);
1274 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1276 trace_seq_puts(s, " <= ");
1277 seq_print_ip_sym(s, entry->stack.caller[i], sym_flags);
1279 trace_seq_puts(s, "\n");
1282 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1287 static int print_trace_fmt(struct trace_iterator *iter)
1289 struct trace_seq *s = &iter->seq;
1290 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1291 struct trace_entry *entry;
1292 unsigned long usec_rem;
1293 unsigned long long t;
1302 comm = trace_find_cmdline(iter->ent->pid);
1304 t = ns2usecs(entry->t);
1305 usec_rem = do_div(t, 1000000ULL);
1306 secs = (unsigned long)t;
1308 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
1311 ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
1314 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1318 switch (entry->type) {
1320 ret = seq_print_ip_sym(s, entry->fn.ip, sym_flags);
1323 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
1324 entry->fn.parent_ip) {
1325 ret = trace_seq_printf(s, " <-");
1328 ret = seq_print_ip_sym(s, entry->fn.parent_ip,
1333 ret = trace_seq_printf(s, "\n");
1339 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1340 state_to_char[entry->ctx.prev_state] : 'X';
1341 T = entry->ctx.next_state < sizeof(state_to_char) ?
1342 state_to_char[entry->ctx.next_state] : 'X';
1343 ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c\n",
1344 entry->ctx.prev_pid,
1345 entry->ctx.prev_prio,
1347 entry->type == TRACE_CTX ? "==>" : " +",
1348 entry->ctx.next_pid,
1349 entry->ctx.next_prio,
1355 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1356 entry->special.arg1,
1357 entry->special.arg2,
1358 entry->special.arg3);
1363 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1365 ret = trace_seq_puts(s, " <= ");
1369 ret = seq_print_ip_sym(s, entry->stack.caller[i],
1374 ret = trace_seq_puts(s, "\n");
1382 static int print_raw_fmt(struct trace_iterator *iter)
1384 struct trace_seq *s = &iter->seq;
1385 struct trace_entry *entry;
1391 ret = trace_seq_printf(s, "%d %d %llu ",
1392 entry->pid, iter->cpu, entry->t);
1396 switch (entry->type) {
1398 ret = trace_seq_printf(s, "%x %x\n",
1399 entry->fn.ip, entry->fn.parent_ip);
1405 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1406 state_to_char[entry->ctx.prev_state] : 'X';
1407 T = entry->ctx.next_state < sizeof(state_to_char) ?
1408 state_to_char[entry->ctx.next_state] : 'X';
1409 if (entry->type == TRACE_WAKE)
1411 ret = trace_seq_printf(s, "%d %d %c %d %d %c\n",
1412 entry->ctx.prev_pid,
1413 entry->ctx.prev_prio,
1415 entry->ctx.next_pid,
1416 entry->ctx.next_prio,
1423 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1424 entry->special.arg1,
1425 entry->special.arg2,
1426 entry->special.arg3);
1434 #define SEQ_PUT_FIELD_RET(s, x) \
1436 if (!trace_seq_putmem(s, &(x), sizeof(x))) \
1440 #define SEQ_PUT_HEX_FIELD_RET(s, x) \
1442 if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \
1446 static int print_hex_fmt(struct trace_iterator *iter)
1448 struct trace_seq *s = &iter->seq;
1449 unsigned char newline = '\n';
1450 struct trace_entry *entry;
1455 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
1456 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
1457 SEQ_PUT_HEX_FIELD_RET(s, entry->t);
1459 switch (entry->type) {
1461 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.ip);
1462 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
1466 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1467 state_to_char[entry->ctx.prev_state] : 'X';
1468 T = entry->ctx.next_state < sizeof(state_to_char) ?
1469 state_to_char[entry->ctx.next_state] : 'X';
1470 if (entry->type == TRACE_WAKE)
1472 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_pid);
1473 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_prio);
1474 SEQ_PUT_HEX_FIELD_RET(s, S);
1475 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_pid);
1476 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_prio);
1477 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
1478 SEQ_PUT_HEX_FIELD_RET(s, T);
1482 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg1);
1483 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg2);
1484 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg3);
1487 SEQ_PUT_FIELD_RET(s, newline);
1492 static int print_bin_fmt(struct trace_iterator *iter)
1494 struct trace_seq *s = &iter->seq;
1495 struct trace_entry *entry;
1499 SEQ_PUT_FIELD_RET(s, entry->pid);
1500 SEQ_PUT_FIELD_RET(s, entry->cpu);
1501 SEQ_PUT_FIELD_RET(s, entry->t);
1503 switch (entry->type) {
1505 SEQ_PUT_FIELD_RET(s, entry->fn.ip);
1506 SEQ_PUT_FIELD_RET(s, entry->fn.parent_ip);
1509 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_pid);
1510 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_prio);
1511 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_state);
1512 SEQ_PUT_FIELD_RET(s, entry->ctx.next_pid);
1513 SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio);
1514 SEQ_PUT_FIELD_RET(s, entry->ctx.next_state);
1518 SEQ_PUT_FIELD_RET(s, entry->special.arg1);
1519 SEQ_PUT_FIELD_RET(s, entry->special.arg2);
1520 SEQ_PUT_FIELD_RET(s, entry->special.arg3);
1526 static int trace_empty(struct trace_iterator *iter)
1528 struct trace_array_cpu *data;
1531 for_each_possible_cpu(cpu) {
1532 data = iter->tr->data[cpu];
1534 if (head_page(data) && data->trace_idx &&
1535 (data->trace_tail != data->trace_head ||
1536 data->trace_tail_idx != data->trace_head_idx))
1542 static int print_trace_line(struct trace_iterator *iter)
1544 if (trace_flags & TRACE_ITER_BIN)
1545 return print_bin_fmt(iter);
1547 if (trace_flags & TRACE_ITER_HEX)
1548 return print_hex_fmt(iter);
1550 if (trace_flags & TRACE_ITER_RAW)
1551 return print_raw_fmt(iter);
1553 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
1554 return print_lat_fmt(iter, iter->idx, iter->cpu);
1556 return print_trace_fmt(iter);
1559 static int s_show(struct seq_file *m, void *v)
1561 struct trace_iterator *iter = v;
1563 if (iter->ent == NULL) {
1565 seq_printf(m, "# tracer: %s\n", iter->trace->name);
1568 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1569 /* print nothing if the buffers are empty */
1570 if (trace_empty(iter))
1572 print_trace_header(m, iter);
1573 if (!(trace_flags & TRACE_ITER_VERBOSE))
1574 print_lat_help_header(m);
1576 if (!(trace_flags & TRACE_ITER_VERBOSE))
1577 print_func_help_header(m);
1580 print_trace_line(iter);
1581 trace_print_seq(m, &iter->seq);
1587 static struct seq_operations tracer_seq_ops = {
1594 static struct trace_iterator *
1595 __tracing_open(struct inode *inode, struct file *file, int *ret)
1597 struct trace_iterator *iter;
1599 if (tracing_disabled) {
1604 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1610 mutex_lock(&trace_types_lock);
1611 if (current_trace && current_trace->print_max)
1614 iter->tr = inode->i_private;
1615 iter->trace = current_trace;
1618 /* TODO stop tracer */
1619 *ret = seq_open(file, &tracer_seq_ops);
1621 struct seq_file *m = file->private_data;
1624 /* stop the trace while dumping */
1628 if (iter->trace && iter->trace->open)
1629 iter->trace->open(iter);
1634 mutex_unlock(&trace_types_lock);
1640 int tracing_open_generic(struct inode *inode, struct file *filp)
1642 if (tracing_disabled)
1645 filp->private_data = inode->i_private;
1649 int tracing_release(struct inode *inode, struct file *file)
1651 struct seq_file *m = (struct seq_file *)file->private_data;
1652 struct trace_iterator *iter = m->private;
1654 mutex_lock(&trace_types_lock);
1655 if (iter->trace && iter->trace->close)
1656 iter->trace->close(iter);
1658 /* reenable tracing if it was previously enabled */
1661 mutex_unlock(&trace_types_lock);
1663 seq_release(inode, file);
1668 static int tracing_open(struct inode *inode, struct file *file)
1672 __tracing_open(inode, file, &ret);
1677 static int tracing_lt_open(struct inode *inode, struct file *file)
1679 struct trace_iterator *iter;
1682 iter = __tracing_open(inode, file, &ret);
1685 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1692 t_next(struct seq_file *m, void *v, loff_t *pos)
1694 struct tracer *t = m->private;
1706 static void *t_start(struct seq_file *m, loff_t *pos)
1708 struct tracer *t = m->private;
1711 mutex_lock(&trace_types_lock);
1712 for (; t && l < *pos; t = t_next(m, t, &l))
1718 static void t_stop(struct seq_file *m, void *p)
1720 mutex_unlock(&trace_types_lock);
1723 static int t_show(struct seq_file *m, void *v)
1725 struct tracer *t = v;
1730 seq_printf(m, "%s", t->name);
1739 static struct seq_operations show_traces_seq_ops = {
1746 static int show_traces_open(struct inode *inode, struct file *file)
1750 if (tracing_disabled)
1753 ret = seq_open(file, &show_traces_seq_ops);
1755 struct seq_file *m = file->private_data;
1756 m->private = trace_types;
1762 static struct file_operations tracing_fops = {
1763 .open = tracing_open,
1765 .llseek = seq_lseek,
1766 .release = tracing_release,
1769 static struct file_operations tracing_lt_fops = {
1770 .open = tracing_lt_open,
1772 .llseek = seq_lseek,
1773 .release = tracing_release,
1776 static struct file_operations show_traces_fops = {
1777 .open = show_traces_open,
1779 .release = seq_release,
1783 * Only trace on a CPU if the bitmask is set:
1785 static cpumask_t tracing_cpumask = CPU_MASK_ALL;
1788 * When tracing/tracing_cpu_mask is modified then this holds
1789 * the new bitmask we are about to install:
1791 static cpumask_t tracing_cpumask_new;
1794 * The tracer itself will not take this lock, but still we want
1795 * to provide a consistent cpumask to user-space:
1797 static DEFINE_MUTEX(tracing_cpumask_update_lock);
1800 * Temporary storage for the character representation of the
1801 * CPU bitmask (and one more byte for the newline):
1803 static char mask_str[NR_CPUS + 1];
1806 tracing_cpumask_read(struct file *filp, char __user *ubuf,
1807 size_t count, loff_t *ppos)
1811 mutex_lock(&tracing_cpumask_update_lock);
1813 len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
1814 if (count - len < 2) {
1818 len += sprintf(mask_str + len, "\n");
1819 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
1822 mutex_unlock(&tracing_cpumask_update_lock);
1828 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
1829 size_t count, loff_t *ppos)
1833 mutex_lock(&tracing_cpumask_update_lock);
1834 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
1838 raw_local_irq_disable();
1839 __raw_spin_lock(&ftrace_max_lock);
1840 for_each_possible_cpu(cpu) {
1842 * Increase/decrease the disabled counter if we are
1843 * about to flip a bit in the cpumask:
1845 if (cpu_isset(cpu, tracing_cpumask) &&
1846 !cpu_isset(cpu, tracing_cpumask_new)) {
1847 atomic_inc(&global_trace.data[cpu]->disabled);
1849 if (!cpu_isset(cpu, tracing_cpumask) &&
1850 cpu_isset(cpu, tracing_cpumask_new)) {
1851 atomic_dec(&global_trace.data[cpu]->disabled);
1854 __raw_spin_unlock(&ftrace_max_lock);
1855 raw_local_irq_enable();
1857 tracing_cpumask = tracing_cpumask_new;
1859 mutex_unlock(&tracing_cpumask_update_lock);
1864 mutex_unlock(&tracing_cpumask_update_lock);
1869 static struct file_operations tracing_cpumask_fops = {
1870 .open = tracing_open_generic,
1871 .read = tracing_cpumask_read,
1872 .write = tracing_cpumask_write,
1876 tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
1877 size_t cnt, loff_t *ppos)
1884 /* calulate max size */
1885 for (i = 0; trace_options[i]; i++) {
1886 len += strlen(trace_options[i]);
1887 len += 3; /* "no" and space */
1890 /* +2 for \n and \0 */
1891 buf = kmalloc(len + 2, GFP_KERNEL);
1895 for (i = 0; trace_options[i]; i++) {
1896 if (trace_flags & (1 << i))
1897 r += sprintf(buf + r, "%s ", trace_options[i]);
1899 r += sprintf(buf + r, "no%s ", trace_options[i]);
1902 r += sprintf(buf + r, "\n");
1903 WARN_ON(r >= len + 2);
1905 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1913 tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
1914 size_t cnt, loff_t *ppos)
1924 if (copy_from_user(&buf, ubuf, cnt))
1929 if (strncmp(buf, "no", 2) == 0) {
1934 for (i = 0; trace_options[i]; i++) {
1935 int len = strlen(trace_options[i]);
1937 if (strncmp(cmp, trace_options[i], len) == 0) {
1939 trace_flags &= ~(1 << i);
1941 trace_flags |= (1 << i);
1946 * If no option could be set, return an error:
1948 if (!trace_options[i])
1956 static struct file_operations tracing_iter_fops = {
1957 .open = tracing_open_generic,
1958 .read = tracing_iter_ctrl_read,
1959 .write = tracing_iter_ctrl_write,
1962 static const char readme_msg[] =
1963 "tracing mini-HOWTO:\n\n"
1965 "# mount -t debugfs nodev /debug\n\n"
1966 "# cat /debug/tracing/available_tracers\n"
1967 "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
1968 "# cat /debug/tracing/current_tracer\n"
1970 "# echo sched_switch > /debug/tracing/current_tracer\n"
1971 "# cat /debug/tracing/current_tracer\n"
1973 "# cat /debug/tracing/iter_ctrl\n"
1974 "noprint-parent nosym-offset nosym-addr noverbose\n"
1975 "# echo print-parent > /debug/tracing/iter_ctrl\n"
1976 "# echo 1 > /debug/tracing/tracing_enabled\n"
1977 "# cat /debug/tracing/trace > /tmp/trace.txt\n"
1978 "echo 0 > /debug/tracing/tracing_enabled\n"
1982 tracing_readme_read(struct file *filp, char __user *ubuf,
1983 size_t cnt, loff_t *ppos)
1985 return simple_read_from_buffer(ubuf, cnt, ppos,
1986 readme_msg, strlen(readme_msg));
1989 static struct file_operations tracing_readme_fops = {
1990 .open = tracing_open_generic,
1991 .read = tracing_readme_read,
1995 tracing_ctrl_read(struct file *filp, char __user *ubuf,
1996 size_t cnt, loff_t *ppos)
1998 struct trace_array *tr = filp->private_data;
2002 r = sprintf(buf, "%ld\n", tr->ctrl);
2003 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2007 tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2008 size_t cnt, loff_t *ppos)
2010 struct trace_array *tr = filp->private_data;
2017 if (copy_from_user(&buf, ubuf, cnt))
2022 val = simple_strtoul(buf, NULL, 10);
2026 mutex_lock(&trace_types_lock);
2027 if (tr->ctrl ^ val) {
2035 if (current_trace && current_trace->ctrl_update)
2036 current_trace->ctrl_update(tr);
2038 mutex_unlock(&trace_types_lock);
2046 tracing_set_trace_read(struct file *filp, char __user *ubuf,
2047 size_t cnt, loff_t *ppos)
2049 char buf[max_tracer_type_len+2];
2052 mutex_lock(&trace_types_lock);
2054 r = sprintf(buf, "%s\n", current_trace->name);
2056 r = sprintf(buf, "\n");
2057 mutex_unlock(&trace_types_lock);
2059 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2063 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2064 size_t cnt, loff_t *ppos)
2066 struct trace_array *tr = &global_trace;
2068 char buf[max_tracer_type_len+1];
2071 if (cnt > max_tracer_type_len)
2072 cnt = max_tracer_type_len;
2074 if (copy_from_user(&buf, ubuf, cnt))
2079 /* strip ending whitespace. */
2080 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2083 mutex_lock(&trace_types_lock);
2084 for (t = trace_types; t; t = t->next) {
2085 if (strcmp(t->name, buf) == 0)
2088 if (!t || t == current_trace)
2091 if (current_trace && current_trace->reset)
2092 current_trace->reset(tr);
2099 mutex_unlock(&trace_types_lock);
2107 tracing_max_lat_read(struct file *filp, char __user *ubuf,
2108 size_t cnt, loff_t *ppos)
2110 unsigned long *ptr = filp->private_data;
2114 r = snprintf(buf, 64, "%ld\n",
2115 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
2118 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2122 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
2123 size_t cnt, loff_t *ppos)
2125 long *ptr = filp->private_data;
2132 if (copy_from_user(&buf, ubuf, cnt))
2137 val = simple_strtoul(buf, NULL, 10);
2144 static atomic_t tracing_reader;
2146 static int tracing_open_pipe(struct inode *inode, struct file *filp)
2148 struct trace_iterator *iter;
2150 if (tracing_disabled)
2153 /* We only allow for reader of the pipe */
2154 if (atomic_inc_return(&tracing_reader) != 1) {
2155 atomic_dec(&tracing_reader);
2159 /* create a buffer to store the information to pass to userspace */
2160 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2164 iter->tr = &global_trace;
2166 filp->private_data = iter;
2171 static int tracing_release_pipe(struct inode *inode, struct file *file)
2173 struct trace_iterator *iter = file->private_data;
2176 atomic_dec(&tracing_reader);
2182 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
2184 struct trace_iterator *iter = filp->private_data;
2186 if (trace_flags & TRACE_ITER_BLOCK) {
2188 * Always select as readable when in blocking mode
2190 return POLLIN | POLLRDNORM;
2193 if (!trace_empty(iter))
2194 return POLLIN | POLLRDNORM;
2195 poll_wait(filp, &trace_wait, poll_table);
2196 if (!trace_empty(iter))
2197 return POLLIN | POLLRDNORM;
2207 tracing_read_pipe(struct file *filp, char __user *ubuf,
2208 size_t cnt, loff_t *ppos)
2210 struct trace_iterator *iter = filp->private_data;
2211 struct trace_array_cpu *data;
2212 static cpumask_t mask;
2214 unsigned long flags;
2215 #ifdef CONFIG_FTRACE
2223 /* return any leftover data */
2224 if (iter->seq.len > start) {
2225 len = iter->seq.len - start;
2228 ret = copy_to_user(ubuf, iter->seq.buffer + start, cnt);
2237 trace_seq_reset(&iter->seq);
2240 while (trace_empty(iter)) {
2241 if (!(trace_flags & TRACE_ITER_BLOCK))
2242 return -EWOULDBLOCK;
2244 * This is a make-shift waitqueue. The reason we don't use
2245 * an actual wait queue is because:
2246 * 1) we only ever have one waiter
2247 * 2) the tracing, traces all functions, we don't want
2248 * the overhead of calling wake_up and friends
2249 * (and tracing them too)
2250 * Anyway, this is really very primitive wakeup.
2252 set_current_state(TASK_INTERRUPTIBLE);
2253 iter->tr->waiter = current;
2255 /* sleep for one second, and try again. */
2256 schedule_timeout(HZ);
2258 iter->tr->waiter = NULL;
2260 if (signal_pending(current))
2264 * We block until we read something and tracing is disabled.
2265 * We still block if tracing is disabled, but we have never
2266 * read anything. This allows a user to cat this file, and
2267 * then enable tracing. But after we have read something,
2268 * we give an EOF when tracing is again disabled.
2270 * iter->pos will be 0 if we haven't read anything.
2272 if (!tracer_enabled && iter->pos)
2278 /* stop when tracing is finished */
2279 if (trace_empty(iter))
2282 if (cnt >= PAGE_SIZE)
2283 cnt = PAGE_SIZE - 1;
2285 memset(iter, 0, sizeof(*iter));
2286 iter->tr = &global_trace;
2290 * We need to stop all tracing on all CPUS to read the
2291 * the next buffer. This is a bit expensive, but is
2292 * not done often. We fill all what we can read,
2293 * and then release the locks again.
2297 local_irq_save(flags);
2298 #ifdef CONFIG_FTRACE
2299 ftrace_save = ftrace_enabled;
2303 for_each_possible_cpu(cpu) {
2304 data = iter->tr->data[cpu];
2306 if (!head_page(data) || !data->trace_idx)
2309 atomic_inc(&data->disabled);
2313 for_each_cpu_mask(cpu, mask) {
2314 data = iter->tr->data[cpu];
2315 __raw_spin_lock(&data->lock);
2318 while (find_next_entry_inc(iter) != NULL) {
2319 int len = iter->seq.len;
2321 ret = print_trace_line(iter);
2323 /* don't print partial lines */
2324 iter->seq.len = len;
2328 trace_consume(iter);
2330 if (iter->seq.len >= cnt)
2334 for_each_cpu_mask(cpu, mask) {
2335 data = iter->tr->data[cpu];
2336 __raw_spin_unlock(&data->lock);
2339 for_each_cpu_mask(cpu, mask) {
2340 data = iter->tr->data[cpu];
2341 atomic_dec(&data->disabled);
2343 #ifdef CONFIG_FTRACE
2344 ftrace_enabled = ftrace_save;
2346 local_irq_restore(flags);
2348 /* Now copy what we have to the user */
2349 read = iter->seq.len;
2353 ret = copy_to_user(ubuf, iter->seq.buffer, read);
2355 if (read < iter->seq.len)
2358 trace_seq_reset(&iter->seq);
2366 static struct file_operations tracing_max_lat_fops = {
2367 .open = tracing_open_generic,
2368 .read = tracing_max_lat_read,
2369 .write = tracing_max_lat_write,
2372 static struct file_operations tracing_ctrl_fops = {
2373 .open = tracing_open_generic,
2374 .read = tracing_ctrl_read,
2375 .write = tracing_ctrl_write,
2378 static struct file_operations set_tracer_fops = {
2379 .open = tracing_open_generic,
2380 .read = tracing_set_trace_read,
2381 .write = tracing_set_trace_write,
2384 static struct file_operations tracing_pipe_fops = {
2385 .open = tracing_open_pipe,
2386 .poll = tracing_poll_pipe,
2387 .read = tracing_read_pipe,
2388 .release = tracing_release_pipe,
2391 #ifdef CONFIG_DYNAMIC_FTRACE
2394 tracing_read_long(struct file *filp, char __user *ubuf,
2395 size_t cnt, loff_t *ppos)
2397 unsigned long *p = filp->private_data;
2401 r = sprintf(buf, "%ld\n", *p);
2403 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2406 static struct file_operations tracing_read_long_fops = {
2407 .open = tracing_open_generic,
2408 .read = tracing_read_long,
2412 static struct dentry *d_tracer;
2414 struct dentry *tracing_init_dentry(void)
2421 d_tracer = debugfs_create_dir("tracing", NULL);
2423 if (!d_tracer && !once) {
2425 pr_warning("Could not create debugfs directory 'tracing'\n");
2432 #ifdef CONFIG_FTRACE_SELFTEST
2433 /* Let selftest have access to static functions in this file */
2434 #include "trace_selftest.c"
2437 static __init void tracer_init_debugfs(void)
2439 struct dentry *d_tracer;
2440 struct dentry *entry;
2442 d_tracer = tracing_init_dentry();
2444 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
2445 &global_trace, &tracing_ctrl_fops);
2447 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
2449 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
2450 NULL, &tracing_iter_fops);
2452 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
2454 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
2455 NULL, &tracing_cpumask_fops);
2457 pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
2459 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
2460 &global_trace, &tracing_lt_fops);
2462 pr_warning("Could not create debugfs 'latency_trace' entry\n");
2464 entry = debugfs_create_file("trace", 0444, d_tracer,
2465 &global_trace, &tracing_fops);
2467 pr_warning("Could not create debugfs 'trace' entry\n");
2469 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
2470 &global_trace, &show_traces_fops);
2472 pr_warning("Could not create debugfs 'trace' entry\n");
2474 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
2475 &global_trace, &set_tracer_fops);
2477 pr_warning("Could not create debugfs 'trace' entry\n");
2479 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
2480 &tracing_max_latency,
2481 &tracing_max_lat_fops);
2483 pr_warning("Could not create debugfs "
2484 "'tracing_max_latency' entry\n");
2486 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
2487 &tracing_thresh, &tracing_max_lat_fops);
2489 pr_warning("Could not create debugfs "
2490 "'tracing_threash' entry\n");
2491 entry = debugfs_create_file("README", 0644, d_tracer,
2492 NULL, &tracing_readme_fops);
2494 pr_warning("Could not create debugfs 'README' entry\n");
2496 entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
2497 NULL, &tracing_pipe_fops);
2499 pr_warning("Could not create debugfs "
2500 "'tracing_threash' entry\n");
2502 #ifdef CONFIG_DYNAMIC_FTRACE
2503 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
2504 &ftrace_update_tot_cnt,
2505 &tracing_read_long_fops);
2507 pr_warning("Could not create debugfs "
2508 "'dyn_ftrace_total_info' entry\n");
2512 /* dummy trace to disable tracing */
2513 static struct tracer no_tracer __read_mostly =
2518 static int trace_alloc_page(void)
2520 struct trace_array_cpu *data;
2521 struct page *page, *tmp;
2526 /* first allocate a page for each CPU */
2527 for_each_possible_cpu(i) {
2528 array = (void *)__get_free_page(GFP_KERNEL);
2529 if (array == NULL) {
2530 printk(KERN_ERR "tracer: failed to allocate page"
2531 "for trace buffer!\n");
2535 page = virt_to_page(array);
2536 list_add(&page->lru, &pages);
2538 /* Only allocate if we are actually using the max trace */
2539 #ifdef CONFIG_TRACER_MAX_TRACE
2540 array = (void *)__get_free_page(GFP_KERNEL);
2541 if (array == NULL) {
2542 printk(KERN_ERR "tracer: failed to allocate page"
2543 "for trace buffer!\n");
2546 page = virt_to_page(array);
2547 list_add(&page->lru, &pages);
2551 /* Now that we successfully allocate a page per CPU, add them */
2552 for_each_possible_cpu(i) {
2553 data = global_trace.data[i];
2554 data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
2555 page = list_entry(pages.next, struct page, lru);
2556 list_del_init(&page->lru);
2557 list_add_tail(&page->lru, &data->trace_pages);
2560 #ifdef CONFIG_TRACER_MAX_TRACE
2561 data = max_tr.data[i];
2562 data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
2563 page = list_entry(pages.next, struct page, lru);
2564 list_del_init(&page->lru);
2565 list_add_tail(&page->lru, &data->trace_pages);
2569 global_trace.entries += ENTRIES_PER_PAGE;
2574 list_for_each_entry_safe(page, tmp, &pages, lru) {
2575 list_del_init(&page->lru);
2581 __init static int tracer_alloc_buffers(void)
2583 struct trace_array_cpu *data;
2590 global_trace.ctrl = tracer_enabled;
2592 /* Allocate the first page for all buffers */
2593 for_each_possible_cpu(i) {
2594 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
2595 max_tr.data[i] = &per_cpu(max_data, i);
2597 array = (void *)__get_free_page(GFP_KERNEL);
2598 if (array == NULL) {
2599 printk(KERN_ERR "tracer: failed to allocate page"
2600 "for trace buffer!\n");
2604 /* set the array to the list */
2605 INIT_LIST_HEAD(&data->trace_pages);
2606 page = virt_to_page(array);
2607 list_add(&page->lru, &data->trace_pages);
2608 /* use the LRU flag to differentiate the two buffers */
2611 /* Only allocate if we are actually using the max trace */
2612 #ifdef CONFIG_TRACER_MAX_TRACE
2613 array = (void *)__get_free_page(GFP_KERNEL);
2614 if (array == NULL) {
2615 printk(KERN_ERR "tracer: failed to allocate page"
2616 "for trace buffer!\n");
2620 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
2621 page = virt_to_page(array);
2622 list_add(&page->lru, &max_tr.data[i]->trace_pages);
2628 * Since we allocate by orders of pages, we may be able to
2631 global_trace.entries = ENTRIES_PER_PAGE;
2634 while (global_trace.entries < trace_nr_entries) {
2635 if (trace_alloc_page())
2639 max_tr.entries = global_trace.entries;
2641 pr_info("tracer: %d pages allocated for %ld",
2642 pages, trace_nr_entries);
2643 pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
2644 pr_info(" actual entries %ld\n", global_trace.entries);
2646 tracer_init_debugfs();
2648 trace_init_cmdlines();
2650 register_tracer(&no_tracer);
2651 current_trace = &no_tracer;
2653 /* All seems OK, enable tracing */
2654 tracing_disabled = 0;
2659 for (i-- ; i >= 0; i--) {
2660 struct page *page, *tmp;
2661 struct trace_array_cpu *data = global_trace.data[i];
2664 list_for_each_entry_safe(page, tmp,
2665 &data->trace_pages, lru) {
2666 list_del_init(&page->lru);
2671 #ifdef CONFIG_TRACER_MAX_TRACE
2672 data = max_tr.data[i];
2674 list_for_each_entry_safe(page, tmp,
2675 &data->trace_pages, lru) {
2676 list_del_init(&page->lru);
2684 fs_initcall(tracer_alloc_buffers);