2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/kprobes.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
43 #include <linux/sched/rt.h>
46 #include "trace_output.h"
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
52 bool ring_buffer_expanded;
55 * We need to change this state when a selftest is running.
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
58 * insertions into the ring-buffer such as trace_printk could occurred
59 * at the same time, giving false positive or negative results.
61 static bool __read_mostly tracing_selftest_running;
64 * If a tracer is running, we do not want to run SELFTEST.
66 bool __read_mostly tracing_selftest_disabled;
68 /* Pipe tracepoints to printk */
69 struct trace_iterator *tracepoint_print_iter;
70 int tracepoint_printk;
72 /* For tracers that don't implement custom flags */
73 static struct tracer_opt dummy_tracer_opt[] = {
77 static struct tracer_flags dummy_tracer_flags = {
79 .opts = dummy_tracer_opt
83 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
89 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
93 static DEFINE_PER_CPU(bool, trace_cmdline_save);
96 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
101 static int tracing_disabled = 1;
103 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
105 cpumask_var_t __read_mostly tracing_buffer_mask;
108 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
110 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
111 * is set, then ftrace_dump is called. This will output the contents
112 * of the ftrace buffers to the console. This is very useful for
113 * capturing traces that lead to crashes and outputing it to a
116 * It is default off, but you can enable it with either specifying
117 * "ftrace_dump_on_oops" in the kernel command line, or setting
118 * /proc/sys/kernel/ftrace_dump_on_oops
119 * Set 1 if you want to dump buffers of all CPUs
120 * Set 2 if you want to dump the buffer of the CPU that triggered oops
123 enum ftrace_dump_mode ftrace_dump_on_oops;
125 /* When set, tracing will stop when a WARN*() is hit */
126 int __disable_trace_on_warning;
128 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
129 /* Map of enums to their values, for "enum_map" file */
130 struct trace_enum_map_head {
132 unsigned long length;
135 union trace_enum_map_item;
137 struct trace_enum_map_tail {
139 * "end" is first and points to NULL as it must be different
140 * than "mod" or "enum_string"
142 union trace_enum_map_item *next;
143 const char *end; /* points to NULL */
146 static DEFINE_MUTEX(trace_enum_mutex);
149 * The trace_enum_maps are saved in an array with two extra elements,
150 * one at the beginning, and one at the end. The beginning item contains
151 * the count of the saved maps (head.length), and the module they
152 * belong to if not built in (head.mod). The ending item contains a
153 * pointer to the next array of saved enum_map items.
155 union trace_enum_map_item {
156 struct trace_enum_map map;
157 struct trace_enum_map_head head;
158 struct trace_enum_map_tail tail;
161 static union trace_enum_map_item *trace_enum_maps;
162 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
164 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
166 #define MAX_TRACER_SIZE 100
167 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
168 static char *default_bootup_tracer;
170 static bool allocate_snapshot;
172 static int __init set_cmdline_ftrace(char *str)
174 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
175 default_bootup_tracer = bootup_tracer_buf;
176 /* We are using ftrace early, expand it */
177 ring_buffer_expanded = true;
180 __setup("ftrace=", set_cmdline_ftrace);
182 static int __init set_ftrace_dump_on_oops(char *str)
184 if (*str++ != '=' || !*str) {
185 ftrace_dump_on_oops = DUMP_ALL;
189 if (!strcmp("orig_cpu", str)) {
190 ftrace_dump_on_oops = DUMP_ORIG;
196 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
198 static int __init stop_trace_on_warning(char *str)
200 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
201 __disable_trace_on_warning = 1;
204 __setup("traceoff_on_warning", stop_trace_on_warning);
206 static int __init boot_alloc_snapshot(char *str)
208 allocate_snapshot = true;
209 /* We also need the main ring buffer expanded */
210 ring_buffer_expanded = true;
213 __setup("alloc_snapshot", boot_alloc_snapshot);
216 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
217 static char *trace_boot_options __initdata;
219 static int __init set_trace_boot_options(char *str)
221 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
222 trace_boot_options = trace_boot_options_buf;
225 __setup("trace_options=", set_trace_boot_options);
227 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
228 static char *trace_boot_clock __initdata;
230 static int __init set_trace_boot_clock(char *str)
232 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
233 trace_boot_clock = trace_boot_clock_buf;
236 __setup("trace_clock=", set_trace_boot_clock);
238 static int __init set_tracepoint_printk(char *str)
240 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
241 tracepoint_printk = 1;
244 __setup("tp_printk", set_tracepoint_printk);
246 unsigned long long ns2usecs(cycle_t nsec)
253 /* trace_flags holds trace_options default values */
254 #define TRACE_DEFAULT_FLAGS \
255 (FUNCTION_DEFAULT_FLAGS | \
256 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
257 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
258 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
259 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
261 /* trace_options that are only supported by global_trace */
262 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
263 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
267 * The global_trace is the descriptor that holds the tracing
268 * buffers for the live tracing. For each CPU, it contains
269 * a link list of pages that will store trace entries. The
270 * page descriptor of the pages in the memory is used to hold
271 * the link list by linking the lru item in the page descriptor
272 * to each of the pages in the buffer per CPU.
274 * For each active CPU there is a data field that holds the
275 * pages for the buffer for that CPU. Each CPU has the same number
276 * of pages allocated for its buffer.
278 static struct trace_array global_trace = {
279 .trace_flags = TRACE_DEFAULT_FLAGS,
282 LIST_HEAD(ftrace_trace_arrays);
284 int trace_array_get(struct trace_array *this_tr)
286 struct trace_array *tr;
289 mutex_lock(&trace_types_lock);
290 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
297 mutex_unlock(&trace_types_lock);
302 static void __trace_array_put(struct trace_array *this_tr)
304 WARN_ON(!this_tr->ref);
308 void trace_array_put(struct trace_array *this_tr)
310 mutex_lock(&trace_types_lock);
311 __trace_array_put(this_tr);
312 mutex_unlock(&trace_types_lock);
315 int filter_check_discard(struct trace_event_file *file, void *rec,
316 struct ring_buffer *buffer,
317 struct ring_buffer_event *event)
319 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
320 !filter_match_preds(file->filter, rec)) {
321 ring_buffer_discard_commit(buffer, event);
327 EXPORT_SYMBOL_GPL(filter_check_discard);
329 int call_filter_check_discard(struct trace_event_call *call, void *rec,
330 struct ring_buffer *buffer,
331 struct ring_buffer_event *event)
333 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
334 !filter_match_preds(call->filter, rec)) {
335 ring_buffer_discard_commit(buffer, event);
341 EXPORT_SYMBOL_GPL(call_filter_check_discard);
343 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
347 /* Early boot up does not have a buffer yet */
349 return trace_clock_local();
351 ts = ring_buffer_time_stamp(buf->buffer, cpu);
352 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
357 cycle_t ftrace_now(int cpu)
359 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
363 * tracing_is_enabled - Show if global_trace has been disabled
365 * Shows if the global trace has been enabled or not. It uses the
366 * mirror flag "buffer_disabled" to be used in fast paths such as for
367 * the irqsoff tracer. But it may be inaccurate due to races. If you
368 * need to know the accurate state, use tracing_is_on() which is a little
369 * slower, but accurate.
371 int tracing_is_enabled(void)
374 * For quick access (irqsoff uses this in fast path), just
375 * return the mirror variable of the state of the ring buffer.
376 * It's a little racy, but we don't really care.
379 return !global_trace.buffer_disabled;
383 * trace_buf_size is the size in bytes that is allocated
384 * for a buffer. Note, the number of bytes is always rounded
387 * This number is purposely set to a low number of 16384.
388 * If the dump on oops happens, it will be much appreciated
389 * to not have to wait for all that output. Anyway this can be
390 * boot time and run time configurable.
392 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
394 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
396 /* trace_types holds a link list of available tracers. */
397 static struct tracer *trace_types __read_mostly;
400 * trace_types_lock is used to protect the trace_types list.
402 DEFINE_MUTEX(trace_types_lock);
405 * serialize the access of the ring buffer
407 * ring buffer serializes readers, but it is low level protection.
408 * The validity of the events (which returns by ring_buffer_peek() ..etc)
409 * are not protected by ring buffer.
411 * The content of events may become garbage if we allow other process consumes
412 * these events concurrently:
413 * A) the page of the consumed events may become a normal page
414 * (not reader page) in ring buffer, and this page will be rewrited
415 * by events producer.
416 * B) The page of the consumed events may become a page for splice_read,
417 * and this page will be returned to system.
419 * These primitives allow multi process access to different cpu ring buffer
422 * These primitives don't distinguish read-only and read-consume access.
423 * Multi read-only access are also serialized.
427 static DECLARE_RWSEM(all_cpu_access_lock);
428 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
430 static inline void trace_access_lock(int cpu)
432 if (cpu == RING_BUFFER_ALL_CPUS) {
433 /* gain it for accessing the whole ring buffer. */
434 down_write(&all_cpu_access_lock);
436 /* gain it for accessing a cpu ring buffer. */
438 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
439 down_read(&all_cpu_access_lock);
441 /* Secondly block other access to this @cpu ring buffer. */
442 mutex_lock(&per_cpu(cpu_access_lock, cpu));
446 static inline void trace_access_unlock(int cpu)
448 if (cpu == RING_BUFFER_ALL_CPUS) {
449 up_write(&all_cpu_access_lock);
451 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
452 up_read(&all_cpu_access_lock);
456 static inline void trace_access_lock_init(void)
460 for_each_possible_cpu(cpu)
461 mutex_init(&per_cpu(cpu_access_lock, cpu));
466 static DEFINE_MUTEX(access_lock);
468 static inline void trace_access_lock(int cpu)
471 mutex_lock(&access_lock);
474 static inline void trace_access_unlock(int cpu)
477 mutex_unlock(&access_lock);
480 static inline void trace_access_lock_init(void)
486 #ifdef CONFIG_STACKTRACE
487 static void __ftrace_trace_stack(struct ring_buffer *buffer,
489 int skip, int pc, struct pt_regs *regs);
490 static inline void ftrace_trace_stack(struct trace_array *tr,
491 struct ring_buffer *buffer,
493 int skip, int pc, struct pt_regs *regs);
496 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
498 int skip, int pc, struct pt_regs *regs)
501 static inline void ftrace_trace_stack(struct trace_array *tr,
502 struct ring_buffer *buffer,
504 int skip, int pc, struct pt_regs *regs)
510 static void tracer_tracing_on(struct trace_array *tr)
512 if (tr->trace_buffer.buffer)
513 ring_buffer_record_on(tr->trace_buffer.buffer);
515 * This flag is looked at when buffers haven't been allocated
516 * yet, or by some tracers (like irqsoff), that just want to
517 * know if the ring buffer has been disabled, but it can handle
518 * races of where it gets disabled but we still do a record.
519 * As the check is in the fast path of the tracers, it is more
520 * important to be fast than accurate.
522 tr->buffer_disabled = 0;
523 /* Make the flag seen by readers */
528 * tracing_on - enable tracing buffers
530 * This function enables tracing buffers that may have been
531 * disabled with tracing_off.
533 void tracing_on(void)
535 tracer_tracing_on(&global_trace);
537 EXPORT_SYMBOL_GPL(tracing_on);
540 * __trace_puts - write a constant string into the trace buffer.
541 * @ip: The address of the caller
542 * @str: The constant string to write
543 * @size: The size of the string.
545 int __trace_puts(unsigned long ip, const char *str, int size)
547 struct ring_buffer_event *event;
548 struct ring_buffer *buffer;
549 struct print_entry *entry;
550 unsigned long irq_flags;
554 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
557 pc = preempt_count();
559 if (unlikely(tracing_selftest_running || tracing_disabled))
562 alloc = sizeof(*entry) + size + 2; /* possible \n added */
564 local_save_flags(irq_flags);
565 buffer = global_trace.trace_buffer.buffer;
566 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
571 entry = ring_buffer_event_data(event);
574 memcpy(&entry->buf, str, size);
576 /* Add a newline if necessary */
577 if (entry->buf[size - 1] != '\n') {
578 entry->buf[size] = '\n';
579 entry->buf[size + 1] = '\0';
581 entry->buf[size] = '\0';
583 __buffer_unlock_commit(buffer, event);
584 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
588 EXPORT_SYMBOL_GPL(__trace_puts);
591 * __trace_bputs - write the pointer to a constant string into trace buffer
592 * @ip: The address of the caller
593 * @str: The constant string to write to the buffer to
595 int __trace_bputs(unsigned long ip, const char *str)
597 struct ring_buffer_event *event;
598 struct ring_buffer *buffer;
599 struct bputs_entry *entry;
600 unsigned long irq_flags;
601 int size = sizeof(struct bputs_entry);
604 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
607 pc = preempt_count();
609 if (unlikely(tracing_selftest_running || tracing_disabled))
612 local_save_flags(irq_flags);
613 buffer = global_trace.trace_buffer.buffer;
614 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
619 entry = ring_buffer_event_data(event);
623 __buffer_unlock_commit(buffer, event);
624 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
628 EXPORT_SYMBOL_GPL(__trace_bputs);
630 #ifdef CONFIG_TRACER_SNAPSHOT
632 * trace_snapshot - take a snapshot of the current buffer.
634 * This causes a swap between the snapshot buffer and the current live
635 * tracing buffer. You can use this to take snapshots of the live
636 * trace when some condition is triggered, but continue to trace.
638 * Note, make sure to allocate the snapshot with either
639 * a tracing_snapshot_alloc(), or by doing it manually
640 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
642 * If the snapshot buffer is not allocated, it will stop tracing.
643 * Basically making a permanent snapshot.
645 void tracing_snapshot(void)
647 struct trace_array *tr = &global_trace;
648 struct tracer *tracer = tr->current_trace;
652 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
653 internal_trace_puts("*** snapshot is being ignored ***\n");
657 if (!tr->allocated_snapshot) {
658 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
659 internal_trace_puts("*** stopping trace here! ***\n");
664 /* Note, snapshot can not be used when the tracer uses it */
665 if (tracer->use_max_tr) {
666 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
667 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
671 local_irq_save(flags);
672 update_max_tr(tr, current, smp_processor_id());
673 local_irq_restore(flags);
675 EXPORT_SYMBOL_GPL(tracing_snapshot);
677 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
678 struct trace_buffer *size_buf, int cpu_id);
679 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
681 static int alloc_snapshot(struct trace_array *tr)
685 if (!tr->allocated_snapshot) {
687 /* allocate spare buffer */
688 ret = resize_buffer_duplicate_size(&tr->max_buffer,
689 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
693 tr->allocated_snapshot = true;
699 static void free_snapshot(struct trace_array *tr)
702 * We don't free the ring buffer. instead, resize it because
703 * The max_tr ring buffer has some state (e.g. ring->clock) and
704 * we want preserve it.
706 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
707 set_buffer_entries(&tr->max_buffer, 1);
708 tracing_reset_online_cpus(&tr->max_buffer);
709 tr->allocated_snapshot = false;
713 * tracing_alloc_snapshot - allocate snapshot buffer.
715 * This only allocates the snapshot buffer if it isn't already
716 * allocated - it doesn't also take a snapshot.
718 * This is meant to be used in cases where the snapshot buffer needs
719 * to be set up for events that can't sleep but need to be able to
720 * trigger a snapshot.
722 int tracing_alloc_snapshot(void)
724 struct trace_array *tr = &global_trace;
727 ret = alloc_snapshot(tr);
732 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
735 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
737 * This is similar to trace_snapshot(), but it will allocate the
738 * snapshot buffer if it isn't already allocated. Use this only
739 * where it is safe to sleep, as the allocation may sleep.
741 * This causes a swap between the snapshot buffer and the current live
742 * tracing buffer. You can use this to take snapshots of the live
743 * trace when some condition is triggered, but continue to trace.
745 void tracing_snapshot_alloc(void)
749 ret = tracing_alloc_snapshot();
755 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
757 void tracing_snapshot(void)
759 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
761 EXPORT_SYMBOL_GPL(tracing_snapshot);
762 int tracing_alloc_snapshot(void)
764 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
767 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
768 void tracing_snapshot_alloc(void)
773 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
774 #endif /* CONFIG_TRACER_SNAPSHOT */
776 static void tracer_tracing_off(struct trace_array *tr)
778 if (tr->trace_buffer.buffer)
779 ring_buffer_record_off(tr->trace_buffer.buffer);
781 * This flag is looked at when buffers haven't been allocated
782 * yet, or by some tracers (like irqsoff), that just want to
783 * know if the ring buffer has been disabled, but it can handle
784 * races of where it gets disabled but we still do a record.
785 * As the check is in the fast path of the tracers, it is more
786 * important to be fast than accurate.
788 tr->buffer_disabled = 1;
789 /* Make the flag seen by readers */
794 * tracing_off - turn off tracing buffers
796 * This function stops the tracing buffers from recording data.
797 * It does not disable any overhead the tracers themselves may
798 * be causing. This function simply causes all recording to
799 * the ring buffers to fail.
801 void tracing_off(void)
803 tracer_tracing_off(&global_trace);
805 EXPORT_SYMBOL_GPL(tracing_off);
807 void disable_trace_on_warning(void)
809 if (__disable_trace_on_warning)
814 * tracer_tracing_is_on - show real state of ring buffer enabled
815 * @tr : the trace array to know if ring buffer is enabled
817 * Shows real state of the ring buffer if it is enabled or not.
819 static int tracer_tracing_is_on(struct trace_array *tr)
821 if (tr->trace_buffer.buffer)
822 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
823 return !tr->buffer_disabled;
827 * tracing_is_on - show state of ring buffers enabled
829 int tracing_is_on(void)
831 return tracer_tracing_is_on(&global_trace);
833 EXPORT_SYMBOL_GPL(tracing_is_on);
835 static int __init set_buf_size(char *str)
837 unsigned long buf_size;
841 buf_size = memparse(str, &str);
842 /* nr_entries can not be zero */
845 trace_buf_size = buf_size;
848 __setup("trace_buf_size=", set_buf_size);
850 static int __init set_tracing_thresh(char *str)
852 unsigned long threshold;
857 ret = kstrtoul(str, 0, &threshold);
860 tracing_thresh = threshold * 1000;
863 __setup("tracing_thresh=", set_tracing_thresh);
865 unsigned long nsecs_to_usecs(unsigned long nsecs)
871 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
872 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
873 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
874 * of strings in the order that the enums were defined.
879 /* These must match the bit postions in trace_iterator_flags */
880 static const char *trace_options[] = {
888 int in_ns; /* is this clock in nanoseconds? */
890 { trace_clock_local, "local", 1 },
891 { trace_clock_global, "global", 1 },
892 { trace_clock_counter, "counter", 0 },
893 { trace_clock_jiffies, "uptime", 0 },
894 { trace_clock, "perf", 1 },
895 { ktime_get_mono_fast_ns, "mono", 1 },
896 { ktime_get_raw_fast_ns, "mono_raw", 1 },
901 * trace_parser_get_init - gets the buffer for trace parser
903 int trace_parser_get_init(struct trace_parser *parser, int size)
905 memset(parser, 0, sizeof(*parser));
907 parser->buffer = kmalloc(size, GFP_KERNEL);
916 * trace_parser_put - frees the buffer for trace parser
918 void trace_parser_put(struct trace_parser *parser)
920 kfree(parser->buffer);
924 * trace_get_user - reads the user input string separated by space
925 * (matched by isspace(ch))
927 * For each string found the 'struct trace_parser' is updated,
928 * and the function returns.
930 * Returns number of bytes read.
932 * See kernel/trace/trace.h for 'struct trace_parser' details.
934 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
935 size_t cnt, loff_t *ppos)
942 trace_parser_clear(parser);
944 ret = get_user(ch, ubuf++);
952 * The parser is not finished with the last write,
953 * continue reading the user input without skipping spaces.
956 /* skip white space */
957 while (cnt && isspace(ch)) {
958 ret = get_user(ch, ubuf++);
965 /* only spaces were written */
975 /* read the non-space input */
976 while (cnt && !isspace(ch)) {
977 if (parser->idx < parser->size - 1)
978 parser->buffer[parser->idx++] = ch;
983 ret = get_user(ch, ubuf++);
990 /* We either got finished input or we have to wait for another call. */
992 parser->buffer[parser->idx] = 0;
993 parser->cont = false;
994 } else if (parser->idx < parser->size - 1) {
996 parser->buffer[parser->idx++] = ch;
1009 /* TODO add a seq_buf_to_buffer() */
1010 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1014 if (trace_seq_used(s) <= s->seq.readpos)
1017 len = trace_seq_used(s) - s->seq.readpos;
1020 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1022 s->seq.readpos += cnt;
1026 unsigned long __read_mostly tracing_thresh;
1028 #ifdef CONFIG_TRACER_MAX_TRACE
1030 * Copy the new maximum trace into the separate maximum-trace
1031 * structure. (this way the maximum trace is permanently saved,
1032 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1035 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1037 struct trace_buffer *trace_buf = &tr->trace_buffer;
1038 struct trace_buffer *max_buf = &tr->max_buffer;
1039 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1040 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1043 max_buf->time_start = data->preempt_timestamp;
1045 max_data->saved_latency = tr->max_latency;
1046 max_data->critical_start = data->critical_start;
1047 max_data->critical_end = data->critical_end;
1049 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1050 max_data->pid = tsk->pid;
1052 * If tsk == current, then use current_uid(), as that does not use
1053 * RCU. The irq tracer can be called out of RCU scope.
1056 max_data->uid = current_uid();
1058 max_data->uid = task_uid(tsk);
1060 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1061 max_data->policy = tsk->policy;
1062 max_data->rt_priority = tsk->rt_priority;
1064 /* record this tasks comm */
1065 tracing_record_cmdline(tsk);
1069 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1071 * @tsk: the task with the latency
1072 * @cpu: The cpu that initiated the trace.
1074 * Flip the buffers between the @tr and the max_tr and record information
1075 * about which task was the cause of this latency.
1078 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1080 struct ring_buffer *buf;
1085 WARN_ON_ONCE(!irqs_disabled());
1087 if (!tr->allocated_snapshot) {
1088 /* Only the nop tracer should hit this when disabling */
1089 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1093 arch_spin_lock(&tr->max_lock);
1095 buf = tr->trace_buffer.buffer;
1096 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1097 tr->max_buffer.buffer = buf;
1099 __update_max_tr(tr, tsk, cpu);
1100 arch_spin_unlock(&tr->max_lock);
1104 * update_max_tr_single - only copy one trace over, and reset the rest
1106 * @tsk - task with the latency
1107 * @cpu - the cpu of the buffer to copy.
1109 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1112 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1119 WARN_ON_ONCE(!irqs_disabled());
1120 if (!tr->allocated_snapshot) {
1121 /* Only the nop tracer should hit this when disabling */
1122 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1126 arch_spin_lock(&tr->max_lock);
1128 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1130 if (ret == -EBUSY) {
1132 * We failed to swap the buffer due to a commit taking
1133 * place on this CPU. We fail to record, but we reset
1134 * the max trace buffer (no one writes directly to it)
1135 * and flag that it failed.
1137 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1138 "Failed to swap buffers due to commit in progress\n");
1141 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1143 __update_max_tr(tr, tsk, cpu);
1144 arch_spin_unlock(&tr->max_lock);
1146 #endif /* CONFIG_TRACER_MAX_TRACE */
1148 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1150 /* Iterators are static, they should be filled or empty */
1151 if (trace_buffer_iter(iter, iter->cpu_file))
1154 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1158 #ifdef CONFIG_FTRACE_STARTUP_TEST
1159 static int run_tracer_selftest(struct tracer *type)
1161 struct trace_array *tr = &global_trace;
1162 struct tracer *saved_tracer = tr->current_trace;
1165 if (!type->selftest || tracing_selftest_disabled)
1169 * Run a selftest on this tracer.
1170 * Here we reset the trace buffer, and set the current
1171 * tracer to be this tracer. The tracer can then run some
1172 * internal tracing to verify that everything is in order.
1173 * If we fail, we do not register this tracer.
1175 tracing_reset_online_cpus(&tr->trace_buffer);
1177 tr->current_trace = type;
1179 #ifdef CONFIG_TRACER_MAX_TRACE
1180 if (type->use_max_tr) {
1181 /* If we expanded the buffers, make sure the max is expanded too */
1182 if (ring_buffer_expanded)
1183 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1184 RING_BUFFER_ALL_CPUS);
1185 tr->allocated_snapshot = true;
1189 /* the test is responsible for initializing and enabling */
1190 pr_info("Testing tracer %s: ", type->name);
1191 ret = type->selftest(type, tr);
1192 /* the test is responsible for resetting too */
1193 tr->current_trace = saved_tracer;
1195 printk(KERN_CONT "FAILED!\n");
1196 /* Add the warning after printing 'FAILED' */
1200 /* Only reset on passing, to avoid touching corrupted buffers */
1201 tracing_reset_online_cpus(&tr->trace_buffer);
1203 #ifdef CONFIG_TRACER_MAX_TRACE
1204 if (type->use_max_tr) {
1205 tr->allocated_snapshot = false;
1207 /* Shrink the max buffer again */
1208 if (ring_buffer_expanded)
1209 ring_buffer_resize(tr->max_buffer.buffer, 1,
1210 RING_BUFFER_ALL_CPUS);
1214 printk(KERN_CONT "PASSED\n");
1218 static inline int run_tracer_selftest(struct tracer *type)
1222 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1224 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1227 * register_tracer - register a tracer with the ftrace system.
1228 * @type - the plugin for the tracer
1230 * Register a new plugin tracer.
1232 int register_tracer(struct tracer *type)
1238 pr_info("Tracer must have a name\n");
1242 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1243 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1247 mutex_lock(&trace_types_lock);
1249 tracing_selftest_running = true;
1251 for (t = trace_types; t; t = t->next) {
1252 if (strcmp(type->name, t->name) == 0) {
1254 pr_info("Tracer %s already registered\n",
1261 if (!type->set_flag)
1262 type->set_flag = &dummy_set_flag;
1264 type->flags = &dummy_tracer_flags;
1266 if (!type->flags->opts)
1267 type->flags->opts = dummy_tracer_opt;
1269 ret = run_tracer_selftest(type);
1273 type->next = trace_types;
1275 add_tracer_options(&global_trace, type);
1278 tracing_selftest_running = false;
1279 mutex_unlock(&trace_types_lock);
1281 if (ret || !default_bootup_tracer)
1284 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1287 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1288 /* Do we want this tracer to start on bootup? */
1289 tracing_set_tracer(&global_trace, type->name);
1290 default_bootup_tracer = NULL;
1291 /* disable other selftests, since this will break it. */
1292 tracing_selftest_disabled = true;
1293 #ifdef CONFIG_FTRACE_STARTUP_TEST
1294 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1302 void tracing_reset(struct trace_buffer *buf, int cpu)
1304 struct ring_buffer *buffer = buf->buffer;
1309 ring_buffer_record_disable(buffer);
1311 /* Make sure all commits have finished */
1312 synchronize_sched();
1313 ring_buffer_reset_cpu(buffer, cpu);
1315 ring_buffer_record_enable(buffer);
1318 void tracing_reset_online_cpus(struct trace_buffer *buf)
1320 struct ring_buffer *buffer = buf->buffer;
1326 ring_buffer_record_disable(buffer);
1328 /* Make sure all commits have finished */
1329 synchronize_sched();
1331 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1333 for_each_online_cpu(cpu)
1334 ring_buffer_reset_cpu(buffer, cpu);
1336 ring_buffer_record_enable(buffer);
1339 /* Must have trace_types_lock held */
1340 void tracing_reset_all_online_cpus(void)
1342 struct trace_array *tr;
1344 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1345 tracing_reset_online_cpus(&tr->trace_buffer);
1346 #ifdef CONFIG_TRACER_MAX_TRACE
1347 tracing_reset_online_cpus(&tr->max_buffer);
1352 #define SAVED_CMDLINES_DEFAULT 128
1353 #define NO_CMDLINE_MAP UINT_MAX
1354 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1355 struct saved_cmdlines_buffer {
1356 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1357 unsigned *map_cmdline_to_pid;
1358 unsigned cmdline_num;
1360 char *saved_cmdlines;
1362 static struct saved_cmdlines_buffer *savedcmd;
1364 /* temporary disable recording */
1365 static atomic_t trace_record_cmdline_disabled __read_mostly;
1367 static inline char *get_saved_cmdlines(int idx)
1369 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1372 static inline void set_cmdline(int idx, const char *cmdline)
1374 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1377 static int allocate_cmdlines_buffer(unsigned int val,
1378 struct saved_cmdlines_buffer *s)
1380 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1382 if (!s->map_cmdline_to_pid)
1385 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1386 if (!s->saved_cmdlines) {
1387 kfree(s->map_cmdline_to_pid);
1392 s->cmdline_num = val;
1393 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1394 sizeof(s->map_pid_to_cmdline));
1395 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1396 val * sizeof(*s->map_cmdline_to_pid));
1401 static int trace_create_savedcmd(void)
1405 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1409 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1419 int is_tracing_stopped(void)
1421 return global_trace.stop_count;
1425 * tracing_start - quick start of the tracer
1427 * If tracing is enabled but was stopped by tracing_stop,
1428 * this will start the tracer back up.
1430 void tracing_start(void)
1432 struct ring_buffer *buffer;
1433 unsigned long flags;
1435 if (tracing_disabled)
1438 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1439 if (--global_trace.stop_count) {
1440 if (global_trace.stop_count < 0) {
1441 /* Someone screwed up their debugging */
1443 global_trace.stop_count = 0;
1448 /* Prevent the buffers from switching */
1449 arch_spin_lock(&global_trace.max_lock);
1451 buffer = global_trace.trace_buffer.buffer;
1453 ring_buffer_record_enable(buffer);
1455 #ifdef CONFIG_TRACER_MAX_TRACE
1456 buffer = global_trace.max_buffer.buffer;
1458 ring_buffer_record_enable(buffer);
1461 arch_spin_unlock(&global_trace.max_lock);
1464 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1467 static void tracing_start_tr(struct trace_array *tr)
1469 struct ring_buffer *buffer;
1470 unsigned long flags;
1472 if (tracing_disabled)
1475 /* If global, we need to also start the max tracer */
1476 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1477 return tracing_start();
1479 raw_spin_lock_irqsave(&tr->start_lock, flags);
1481 if (--tr->stop_count) {
1482 if (tr->stop_count < 0) {
1483 /* Someone screwed up their debugging */
1490 buffer = tr->trace_buffer.buffer;
1492 ring_buffer_record_enable(buffer);
1495 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1499 * tracing_stop - quick stop of the tracer
1501 * Light weight way to stop tracing. Use in conjunction with
1504 void tracing_stop(void)
1506 struct ring_buffer *buffer;
1507 unsigned long flags;
1509 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1510 if (global_trace.stop_count++)
1513 /* Prevent the buffers from switching */
1514 arch_spin_lock(&global_trace.max_lock);
1516 buffer = global_trace.trace_buffer.buffer;
1518 ring_buffer_record_disable(buffer);
1520 #ifdef CONFIG_TRACER_MAX_TRACE
1521 buffer = global_trace.max_buffer.buffer;
1523 ring_buffer_record_disable(buffer);
1526 arch_spin_unlock(&global_trace.max_lock);
1529 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1532 static void tracing_stop_tr(struct trace_array *tr)
1534 struct ring_buffer *buffer;
1535 unsigned long flags;
1537 /* If global, we need to also stop the max tracer */
1538 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1539 return tracing_stop();
1541 raw_spin_lock_irqsave(&tr->start_lock, flags);
1542 if (tr->stop_count++)
1545 buffer = tr->trace_buffer.buffer;
1547 ring_buffer_record_disable(buffer);
1550 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1553 void trace_stop_cmdline_recording(void);
1555 static int trace_save_cmdline(struct task_struct *tsk)
1559 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1563 * It's not the end of the world if we don't get
1564 * the lock, but we also don't want to spin
1565 * nor do we want to disable interrupts,
1566 * so if we miss here, then better luck next time.
1568 if (!arch_spin_trylock(&trace_cmdline_lock))
1571 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1572 if (idx == NO_CMDLINE_MAP) {
1573 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1576 * Check whether the cmdline buffer at idx has a pid
1577 * mapped. We are going to overwrite that entry so we
1578 * need to clear the map_pid_to_cmdline. Otherwise we
1579 * would read the new comm for the old pid.
1581 pid = savedcmd->map_cmdline_to_pid[idx];
1582 if (pid != NO_CMDLINE_MAP)
1583 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1585 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1586 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1588 savedcmd->cmdline_idx = idx;
1591 set_cmdline(idx, tsk->comm);
1593 arch_spin_unlock(&trace_cmdline_lock);
1598 static void __trace_find_cmdline(int pid, char comm[])
1603 strcpy(comm, "<idle>");
1607 if (WARN_ON_ONCE(pid < 0)) {
1608 strcpy(comm, "<XXX>");
1612 if (pid > PID_MAX_DEFAULT) {
1613 strcpy(comm, "<...>");
1617 map = savedcmd->map_pid_to_cmdline[pid];
1618 if (map != NO_CMDLINE_MAP)
1619 strcpy(comm, get_saved_cmdlines(map));
1621 strcpy(comm, "<...>");
1624 void trace_find_cmdline(int pid, char comm[])
1627 arch_spin_lock(&trace_cmdline_lock);
1629 __trace_find_cmdline(pid, comm);
1631 arch_spin_unlock(&trace_cmdline_lock);
1635 void tracing_record_cmdline(struct task_struct *tsk)
1637 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1640 if (!__this_cpu_read(trace_cmdline_save))
1643 if (trace_save_cmdline(tsk))
1644 __this_cpu_write(trace_cmdline_save, false);
1648 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1651 struct task_struct *tsk = current;
1653 entry->preempt_count = pc & 0xff;
1654 entry->pid = (tsk) ? tsk->pid : 0;
1656 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1657 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1659 TRACE_FLAG_IRQS_NOSUPPORT |
1661 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1662 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1663 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1664 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1666 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1668 struct ring_buffer_event *
1669 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1672 unsigned long flags, int pc)
1674 struct ring_buffer_event *event;
1676 event = ring_buffer_lock_reserve(buffer, len);
1677 if (event != NULL) {
1678 struct trace_entry *ent = ring_buffer_event_data(event);
1680 tracing_generic_entry_update(ent, flags, pc);
1688 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1690 __this_cpu_write(trace_cmdline_save, true);
1691 ring_buffer_unlock_commit(buffer, event);
1694 void trace_buffer_unlock_commit(struct trace_array *tr,
1695 struct ring_buffer *buffer,
1696 struct ring_buffer_event *event,
1697 unsigned long flags, int pc)
1699 __buffer_unlock_commit(buffer, event);
1701 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
1702 ftrace_trace_userstack(buffer, flags, pc);
1704 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1706 static struct ring_buffer *temp_buffer;
1708 struct ring_buffer_event *
1709 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1710 struct trace_event_file *trace_file,
1711 int type, unsigned long len,
1712 unsigned long flags, int pc)
1714 struct ring_buffer_event *entry;
1716 *current_rb = trace_file->tr->trace_buffer.buffer;
1717 entry = trace_buffer_lock_reserve(*current_rb,
1718 type, len, flags, pc);
1720 * If tracing is off, but we have triggers enabled
1721 * we still need to look at the event data. Use the temp_buffer
1722 * to store the trace event for the tigger to use. It's recusive
1723 * safe and will not be recorded anywhere.
1725 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
1726 *current_rb = temp_buffer;
1727 entry = trace_buffer_lock_reserve(*current_rb,
1728 type, len, flags, pc);
1732 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1734 struct ring_buffer_event *
1735 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1736 int type, unsigned long len,
1737 unsigned long flags, int pc)
1739 *current_rb = global_trace.trace_buffer.buffer;
1740 return trace_buffer_lock_reserve(*current_rb,
1741 type, len, flags, pc);
1743 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1745 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1746 struct ring_buffer *buffer,
1747 struct ring_buffer_event *event,
1748 unsigned long flags, int pc,
1749 struct pt_regs *regs)
1751 __buffer_unlock_commit(buffer, event);
1753 ftrace_trace_stack(tr, buffer, flags, 6, pc, regs);
1754 ftrace_trace_userstack(buffer, flags, pc);
1756 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1758 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1759 struct ring_buffer_event *event)
1761 ring_buffer_discard_commit(buffer, event);
1763 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1766 trace_function(struct trace_array *tr,
1767 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1770 struct trace_event_call *call = &event_function;
1771 struct ring_buffer *buffer = tr->trace_buffer.buffer;
1772 struct ring_buffer_event *event;
1773 struct ftrace_entry *entry;
1775 /* If we are reading the ring buffer, don't trace */
1776 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1779 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1783 entry = ring_buffer_event_data(event);
1785 entry->parent_ip = parent_ip;
1787 if (!call_filter_check_discard(call, entry, buffer, event))
1788 __buffer_unlock_commit(buffer, event);
1791 #ifdef CONFIG_STACKTRACE
1793 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1794 struct ftrace_stack {
1795 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1798 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1799 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1801 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1802 unsigned long flags,
1803 int skip, int pc, struct pt_regs *regs)
1805 struct trace_event_call *call = &event_kernel_stack;
1806 struct ring_buffer_event *event;
1807 struct stack_entry *entry;
1808 struct stack_trace trace;
1810 int size = FTRACE_STACK_ENTRIES;
1812 trace.nr_entries = 0;
1816 * Since events can happen in NMIs there's no safe way to
1817 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1818 * or NMI comes in, it will just have to use the default
1819 * FTRACE_STACK_SIZE.
1821 preempt_disable_notrace();
1823 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1825 * We don't need any atomic variables, just a barrier.
1826 * If an interrupt comes in, we don't care, because it would
1827 * have exited and put the counter back to what we want.
1828 * We just need a barrier to keep gcc from moving things
1832 if (use_stack == 1) {
1833 trace.entries = this_cpu_ptr(ftrace_stack.calls);
1834 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1837 save_stack_trace_regs(regs, &trace);
1839 save_stack_trace(&trace);
1841 if (trace.nr_entries > size)
1842 size = trace.nr_entries;
1844 /* From now on, use_stack is a boolean */
1847 size *= sizeof(unsigned long);
1849 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1850 sizeof(*entry) + size, flags, pc);
1853 entry = ring_buffer_event_data(event);
1855 memset(&entry->caller, 0, size);
1858 memcpy(&entry->caller, trace.entries,
1859 trace.nr_entries * sizeof(unsigned long));
1861 trace.max_entries = FTRACE_STACK_ENTRIES;
1862 trace.entries = entry->caller;
1864 save_stack_trace_regs(regs, &trace);
1866 save_stack_trace(&trace);
1869 entry->size = trace.nr_entries;
1871 if (!call_filter_check_discard(call, entry, buffer, event))
1872 __buffer_unlock_commit(buffer, event);
1875 /* Again, don't let gcc optimize things here */
1877 __this_cpu_dec(ftrace_stack_reserve);
1878 preempt_enable_notrace();
1882 static inline void ftrace_trace_stack(struct trace_array *tr,
1883 struct ring_buffer *buffer,
1884 unsigned long flags,
1885 int skip, int pc, struct pt_regs *regs)
1887 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
1890 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1893 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1896 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1900 * trace_dump_stack - record a stack back trace in the trace buffer
1901 * @skip: Number of functions to skip (helper handlers)
1903 void trace_dump_stack(int skip)
1905 unsigned long flags;
1907 if (tracing_disabled || tracing_selftest_running)
1910 local_save_flags(flags);
1913 * Skip 3 more, seems to get us at the caller of
1917 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1918 flags, skip, preempt_count(), NULL);
1921 static DEFINE_PER_CPU(int, user_stack_count);
1924 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1926 struct trace_event_call *call = &event_user_stack;
1927 struct ring_buffer_event *event;
1928 struct userstack_entry *entry;
1929 struct stack_trace trace;
1931 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
1935 * NMIs can not handle page faults, even with fix ups.
1936 * The save user stack can (and often does) fault.
1938 if (unlikely(in_nmi()))
1942 * prevent recursion, since the user stack tracing may
1943 * trigger other kernel events.
1946 if (__this_cpu_read(user_stack_count))
1949 __this_cpu_inc(user_stack_count);
1951 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1952 sizeof(*entry), flags, pc);
1954 goto out_drop_count;
1955 entry = ring_buffer_event_data(event);
1957 entry->tgid = current->tgid;
1958 memset(&entry->caller, 0, sizeof(entry->caller));
1960 trace.nr_entries = 0;
1961 trace.max_entries = FTRACE_STACK_ENTRIES;
1963 trace.entries = entry->caller;
1965 save_stack_trace_user(&trace);
1966 if (!call_filter_check_discard(call, entry, buffer, event))
1967 __buffer_unlock_commit(buffer, event);
1970 __this_cpu_dec(user_stack_count);
1976 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1978 ftrace_trace_userstack(tr, flags, preempt_count());
1982 #endif /* CONFIG_STACKTRACE */
1984 /* created for use with alloc_percpu */
1985 struct trace_buffer_struct {
1986 char buffer[TRACE_BUF_SIZE];
1989 static struct trace_buffer_struct *trace_percpu_buffer;
1990 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1991 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1992 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1995 * The buffer used is dependent on the context. There is a per cpu
1996 * buffer for normal context, softirq contex, hard irq context and
1997 * for NMI context. Thise allows for lockless recording.
1999 * Note, if the buffers failed to be allocated, then this returns NULL
2001 static char *get_trace_buf(void)
2003 struct trace_buffer_struct *percpu_buffer;
2006 * If we have allocated per cpu buffers, then we do not
2007 * need to do any locking.
2010 percpu_buffer = trace_percpu_nmi_buffer;
2012 percpu_buffer = trace_percpu_irq_buffer;
2013 else if (in_softirq())
2014 percpu_buffer = trace_percpu_sirq_buffer;
2016 percpu_buffer = trace_percpu_buffer;
2021 return this_cpu_ptr(&percpu_buffer->buffer[0]);
2024 static int alloc_percpu_trace_buffer(void)
2026 struct trace_buffer_struct *buffers;
2027 struct trace_buffer_struct *sirq_buffers;
2028 struct trace_buffer_struct *irq_buffers;
2029 struct trace_buffer_struct *nmi_buffers;
2031 buffers = alloc_percpu(struct trace_buffer_struct);
2035 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2039 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2043 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2047 trace_percpu_buffer = buffers;
2048 trace_percpu_sirq_buffer = sirq_buffers;
2049 trace_percpu_irq_buffer = irq_buffers;
2050 trace_percpu_nmi_buffer = nmi_buffers;
2055 free_percpu(irq_buffers);
2057 free_percpu(sirq_buffers);
2059 free_percpu(buffers);
2061 WARN(1, "Could not allocate percpu trace_printk buffer");
2065 static int buffers_allocated;
2067 void trace_printk_init_buffers(void)
2069 if (buffers_allocated)
2072 if (alloc_percpu_trace_buffer())
2075 /* trace_printk() is for debug use only. Don't use it in production. */
2078 pr_warning("**********************************************************\n");
2079 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2080 pr_warning("** **\n");
2081 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2082 pr_warning("** **\n");
2083 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2084 pr_warning("** unsafe for production use. **\n");
2085 pr_warning("** **\n");
2086 pr_warning("** If you see this message and you are not debugging **\n");
2087 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2088 pr_warning("** **\n");
2089 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2090 pr_warning("**********************************************************\n");
2092 /* Expand the buffers to set size */
2093 tracing_update_buffers();
2095 buffers_allocated = 1;
2098 * trace_printk_init_buffers() can be called by modules.
2099 * If that happens, then we need to start cmdline recording
2100 * directly here. If the global_trace.buffer is already
2101 * allocated here, then this was called by module code.
2103 if (global_trace.trace_buffer.buffer)
2104 tracing_start_cmdline_record();
2107 void trace_printk_start_comm(void)
2109 /* Start tracing comms if trace printk is set */
2110 if (!buffers_allocated)
2112 tracing_start_cmdline_record();
2115 static void trace_printk_start_stop_comm(int enabled)
2117 if (!buffers_allocated)
2121 tracing_start_cmdline_record();
2123 tracing_stop_cmdline_record();
2127 * trace_vbprintk - write binary msg to tracing buffer
2130 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2132 struct trace_event_call *call = &event_bprint;
2133 struct ring_buffer_event *event;
2134 struct ring_buffer *buffer;
2135 struct trace_array *tr = &global_trace;
2136 struct bprint_entry *entry;
2137 unsigned long flags;
2139 int len = 0, size, pc;
2141 if (unlikely(tracing_selftest_running || tracing_disabled))
2144 /* Don't pollute graph traces with trace_vprintk internals */
2145 pause_graph_tracing();
2147 pc = preempt_count();
2148 preempt_disable_notrace();
2150 tbuffer = get_trace_buf();
2156 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2158 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2161 local_save_flags(flags);
2162 size = sizeof(*entry) + sizeof(u32) * len;
2163 buffer = tr->trace_buffer.buffer;
2164 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2168 entry = ring_buffer_event_data(event);
2172 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2173 if (!call_filter_check_discard(call, entry, buffer, event)) {
2174 __buffer_unlock_commit(buffer, event);
2175 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2179 preempt_enable_notrace();
2180 unpause_graph_tracing();
2184 EXPORT_SYMBOL_GPL(trace_vbprintk);
2187 __trace_array_vprintk(struct ring_buffer *buffer,
2188 unsigned long ip, const char *fmt, va_list args)
2190 struct trace_event_call *call = &event_print;
2191 struct ring_buffer_event *event;
2192 int len = 0, size, pc;
2193 struct print_entry *entry;
2194 unsigned long flags;
2197 if (tracing_disabled || tracing_selftest_running)
2200 /* Don't pollute graph traces with trace_vprintk internals */
2201 pause_graph_tracing();
2203 pc = preempt_count();
2204 preempt_disable_notrace();
2207 tbuffer = get_trace_buf();
2213 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2215 local_save_flags(flags);
2216 size = sizeof(*entry) + len + 1;
2217 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2221 entry = ring_buffer_event_data(event);
2224 memcpy(&entry->buf, tbuffer, len + 1);
2225 if (!call_filter_check_discard(call, entry, buffer, event)) {
2226 __buffer_unlock_commit(buffer, event);
2227 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
2230 preempt_enable_notrace();
2231 unpause_graph_tracing();
2236 int trace_array_vprintk(struct trace_array *tr,
2237 unsigned long ip, const char *fmt, va_list args)
2239 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2242 int trace_array_printk(struct trace_array *tr,
2243 unsigned long ip, const char *fmt, ...)
2248 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2252 ret = trace_array_vprintk(tr, ip, fmt, ap);
2257 int trace_array_printk_buf(struct ring_buffer *buffer,
2258 unsigned long ip, const char *fmt, ...)
2263 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2267 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2272 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2274 return trace_array_vprintk(&global_trace, ip, fmt, args);
2276 EXPORT_SYMBOL_GPL(trace_vprintk);
2278 static void trace_iterator_increment(struct trace_iterator *iter)
2280 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2284 ring_buffer_read(buf_iter, NULL);
2287 static struct trace_entry *
2288 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2289 unsigned long *lost_events)
2291 struct ring_buffer_event *event;
2292 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2295 event = ring_buffer_iter_peek(buf_iter, ts);
2297 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2301 iter->ent_size = ring_buffer_event_length(event);
2302 return ring_buffer_event_data(event);
2308 static struct trace_entry *
2309 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2310 unsigned long *missing_events, u64 *ent_ts)
2312 struct ring_buffer *buffer = iter->trace_buffer->buffer;
2313 struct trace_entry *ent, *next = NULL;
2314 unsigned long lost_events = 0, next_lost = 0;
2315 int cpu_file = iter->cpu_file;
2316 u64 next_ts = 0, ts;
2322 * If we are in a per_cpu trace file, don't bother by iterating over
2323 * all cpu and peek directly.
2325 if (cpu_file > RING_BUFFER_ALL_CPUS) {
2326 if (ring_buffer_empty_cpu(buffer, cpu_file))
2328 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2330 *ent_cpu = cpu_file;
2335 for_each_tracing_cpu(cpu) {
2337 if (ring_buffer_empty_cpu(buffer, cpu))
2340 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2343 * Pick the entry with the smallest timestamp:
2345 if (ent && (!next || ts < next_ts)) {
2349 next_lost = lost_events;
2350 next_size = iter->ent_size;
2354 iter->ent_size = next_size;
2357 *ent_cpu = next_cpu;
2363 *missing_events = next_lost;
2368 /* Find the next real entry, without updating the iterator itself */
2369 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2370 int *ent_cpu, u64 *ent_ts)
2372 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2375 /* Find the next real entry, and increment the iterator to the next entry */
2376 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2378 iter->ent = __find_next_entry(iter, &iter->cpu,
2379 &iter->lost_events, &iter->ts);
2382 trace_iterator_increment(iter);
2384 return iter->ent ? iter : NULL;
2387 static void trace_consume(struct trace_iterator *iter)
2389 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2390 &iter->lost_events);
2393 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2395 struct trace_iterator *iter = m->private;
2399 WARN_ON_ONCE(iter->leftover);
2403 /* can't go backwards */
2408 ent = trace_find_next_entry_inc(iter);
2412 while (ent && iter->idx < i)
2413 ent = trace_find_next_entry_inc(iter);
2420 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2422 struct ring_buffer_event *event;
2423 struct ring_buffer_iter *buf_iter;
2424 unsigned long entries = 0;
2427 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2429 buf_iter = trace_buffer_iter(iter, cpu);
2433 ring_buffer_iter_reset(buf_iter);
2436 * We could have the case with the max latency tracers
2437 * that a reset never took place on a cpu. This is evident
2438 * by the timestamp being before the start of the buffer.
2440 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2441 if (ts >= iter->trace_buffer->time_start)
2444 ring_buffer_read(buf_iter, NULL);
2447 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2451 * The current tracer is copied to avoid a global locking
2454 static void *s_start(struct seq_file *m, loff_t *pos)
2456 struct trace_iterator *iter = m->private;
2457 struct trace_array *tr = iter->tr;
2458 int cpu_file = iter->cpu_file;
2464 * copy the tracer to avoid using a global lock all around.
2465 * iter->trace is a copy of current_trace, the pointer to the
2466 * name may be used instead of a strcmp(), as iter->trace->name
2467 * will point to the same string as current_trace->name.
2469 mutex_lock(&trace_types_lock);
2470 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2471 *iter->trace = *tr->current_trace;
2472 mutex_unlock(&trace_types_lock);
2474 #ifdef CONFIG_TRACER_MAX_TRACE
2475 if (iter->snapshot && iter->trace->use_max_tr)
2476 return ERR_PTR(-EBUSY);
2479 if (!iter->snapshot)
2480 atomic_inc(&trace_record_cmdline_disabled);
2482 if (*pos != iter->pos) {
2487 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2488 for_each_tracing_cpu(cpu)
2489 tracing_iter_reset(iter, cpu);
2491 tracing_iter_reset(iter, cpu_file);
2494 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2499 * If we overflowed the seq_file before, then we want
2500 * to just reuse the trace_seq buffer again.
2506 p = s_next(m, p, &l);
2510 trace_event_read_lock();
2511 trace_access_lock(cpu_file);
2515 static void s_stop(struct seq_file *m, void *p)
2517 struct trace_iterator *iter = m->private;
2519 #ifdef CONFIG_TRACER_MAX_TRACE
2520 if (iter->snapshot && iter->trace->use_max_tr)
2524 if (!iter->snapshot)
2525 atomic_dec(&trace_record_cmdline_disabled);
2527 trace_access_unlock(iter->cpu_file);
2528 trace_event_read_unlock();
2532 get_total_entries(struct trace_buffer *buf,
2533 unsigned long *total, unsigned long *entries)
2535 unsigned long count;
2541 for_each_tracing_cpu(cpu) {
2542 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2544 * If this buffer has skipped entries, then we hold all
2545 * entries for the trace and we need to ignore the
2546 * ones before the time stamp.
2548 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2549 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2550 /* total is the same as the entries */
2554 ring_buffer_overrun_cpu(buf->buffer, cpu);
2559 static void print_lat_help_header(struct seq_file *m)
2561 seq_puts(m, "# _------=> CPU# \n"
2562 "# / _-----=> irqs-off \n"
2563 "# | / _----=> need-resched \n"
2564 "# || / _---=> hardirq/softirq \n"
2565 "# ||| / _--=> preempt-depth \n"
2567 "# cmd pid ||||| time | caller \n"
2568 "# \\ / ||||| \\ | / \n");
2571 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2573 unsigned long total;
2574 unsigned long entries;
2576 get_total_entries(buf, &total, &entries);
2577 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2578 entries, total, num_online_cpus());
2582 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2584 print_event_info(buf, m);
2585 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2589 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2591 print_event_info(buf, m);
2592 seq_puts(m, "# _-----=> irqs-off\n"
2593 "# / _----=> need-resched\n"
2594 "# | / _---=> hardirq/softirq\n"
2595 "# || / _--=> preempt-depth\n"
2597 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2598 "# | | | |||| | |\n");
2602 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2604 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
2605 struct trace_buffer *buf = iter->trace_buffer;
2606 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2607 struct tracer *type = iter->trace;
2608 unsigned long entries;
2609 unsigned long total;
2610 const char *name = "preemption";
2614 get_total_entries(buf, &total, &entries);
2616 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2618 seq_puts(m, "# -----------------------------------"
2619 "---------------------------------\n");
2620 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2621 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2622 nsecs_to_usecs(data->saved_latency),
2626 #if defined(CONFIG_PREEMPT_NONE)
2628 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2630 #elif defined(CONFIG_PREEMPT)
2635 /* These are reserved for later use */
2638 seq_printf(m, " #P:%d)\n", num_online_cpus());
2642 seq_puts(m, "# -----------------\n");
2643 seq_printf(m, "# | task: %.16s-%d "
2644 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2645 data->comm, data->pid,
2646 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2647 data->policy, data->rt_priority);
2648 seq_puts(m, "# -----------------\n");
2650 if (data->critical_start) {
2651 seq_puts(m, "# => started at: ");
2652 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2653 trace_print_seq(m, &iter->seq);
2654 seq_puts(m, "\n# => ended at: ");
2655 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2656 trace_print_seq(m, &iter->seq);
2657 seq_puts(m, "\n#\n");
2663 static void test_cpu_buff_start(struct trace_iterator *iter)
2665 struct trace_seq *s = &iter->seq;
2666 struct trace_array *tr = iter->tr;
2668 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
2671 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2674 if (cpumask_test_cpu(iter->cpu, iter->started))
2677 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2680 cpumask_set_cpu(iter->cpu, iter->started);
2682 /* Don't print started cpu buffer for the first entry of the trace */
2684 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2688 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2690 struct trace_array *tr = iter->tr;
2691 struct trace_seq *s = &iter->seq;
2692 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
2693 struct trace_entry *entry;
2694 struct trace_event *event;
2698 test_cpu_buff_start(iter);
2700 event = ftrace_find_event(entry->type);
2702 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2703 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2704 trace_print_lat_context(iter);
2706 trace_print_context(iter);
2709 if (trace_seq_has_overflowed(s))
2710 return TRACE_TYPE_PARTIAL_LINE;
2713 return event->funcs->trace(iter, sym_flags, event);
2715 trace_seq_printf(s, "Unknown type %d\n", entry->type);
2717 return trace_handle_return(s);
2720 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2722 struct trace_array *tr = iter->tr;
2723 struct trace_seq *s = &iter->seq;
2724 struct trace_entry *entry;
2725 struct trace_event *event;
2729 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
2730 trace_seq_printf(s, "%d %d %llu ",
2731 entry->pid, iter->cpu, iter->ts);
2733 if (trace_seq_has_overflowed(s))
2734 return TRACE_TYPE_PARTIAL_LINE;
2736 event = ftrace_find_event(entry->type);
2738 return event->funcs->raw(iter, 0, event);
2740 trace_seq_printf(s, "%d ?\n", entry->type);
2742 return trace_handle_return(s);
2745 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2747 struct trace_array *tr = iter->tr;
2748 struct trace_seq *s = &iter->seq;
2749 unsigned char newline = '\n';
2750 struct trace_entry *entry;
2751 struct trace_event *event;
2755 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2756 SEQ_PUT_HEX_FIELD(s, entry->pid);
2757 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2758 SEQ_PUT_HEX_FIELD(s, iter->ts);
2759 if (trace_seq_has_overflowed(s))
2760 return TRACE_TYPE_PARTIAL_LINE;
2763 event = ftrace_find_event(entry->type);
2765 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2766 if (ret != TRACE_TYPE_HANDLED)
2770 SEQ_PUT_FIELD(s, newline);
2772 return trace_handle_return(s);
2775 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2777 struct trace_array *tr = iter->tr;
2778 struct trace_seq *s = &iter->seq;
2779 struct trace_entry *entry;
2780 struct trace_event *event;
2784 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2785 SEQ_PUT_FIELD(s, entry->pid);
2786 SEQ_PUT_FIELD(s, iter->cpu);
2787 SEQ_PUT_FIELD(s, iter->ts);
2788 if (trace_seq_has_overflowed(s))
2789 return TRACE_TYPE_PARTIAL_LINE;
2792 event = ftrace_find_event(entry->type);
2793 return event ? event->funcs->binary(iter, 0, event) :
2797 int trace_empty(struct trace_iterator *iter)
2799 struct ring_buffer_iter *buf_iter;
2802 /* If we are looking at one CPU buffer, only check that one */
2803 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2804 cpu = iter->cpu_file;
2805 buf_iter = trace_buffer_iter(iter, cpu);
2807 if (!ring_buffer_iter_empty(buf_iter))
2810 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2816 for_each_tracing_cpu(cpu) {
2817 buf_iter = trace_buffer_iter(iter, cpu);
2819 if (!ring_buffer_iter_empty(buf_iter))
2822 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2830 /* Called with trace_event_read_lock() held. */
2831 enum print_line_t print_trace_line(struct trace_iterator *iter)
2833 struct trace_array *tr = iter->tr;
2834 unsigned long trace_flags = tr->trace_flags;
2835 enum print_line_t ret;
2837 if (iter->lost_events) {
2838 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2839 iter->cpu, iter->lost_events);
2840 if (trace_seq_has_overflowed(&iter->seq))
2841 return TRACE_TYPE_PARTIAL_LINE;
2844 if (iter->trace && iter->trace->print_line) {
2845 ret = iter->trace->print_line(iter);
2846 if (ret != TRACE_TYPE_UNHANDLED)
2850 if (iter->ent->type == TRACE_BPUTS &&
2851 trace_flags & TRACE_ITER_PRINTK &&
2852 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2853 return trace_print_bputs_msg_only(iter);
2855 if (iter->ent->type == TRACE_BPRINT &&
2856 trace_flags & TRACE_ITER_PRINTK &&
2857 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2858 return trace_print_bprintk_msg_only(iter);
2860 if (iter->ent->type == TRACE_PRINT &&
2861 trace_flags & TRACE_ITER_PRINTK &&
2862 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2863 return trace_print_printk_msg_only(iter);
2865 if (trace_flags & TRACE_ITER_BIN)
2866 return print_bin_fmt(iter);
2868 if (trace_flags & TRACE_ITER_HEX)
2869 return print_hex_fmt(iter);
2871 if (trace_flags & TRACE_ITER_RAW)
2872 return print_raw_fmt(iter);
2874 return print_trace_fmt(iter);
2877 void trace_latency_header(struct seq_file *m)
2879 struct trace_iterator *iter = m->private;
2880 struct trace_array *tr = iter->tr;
2882 /* print nothing if the buffers are empty */
2883 if (trace_empty(iter))
2886 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2887 print_trace_header(m, iter);
2889 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
2890 print_lat_help_header(m);
2893 void trace_default_header(struct seq_file *m)
2895 struct trace_iterator *iter = m->private;
2896 struct trace_array *tr = iter->tr;
2897 unsigned long trace_flags = tr->trace_flags;
2899 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2902 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2903 /* print nothing if the buffers are empty */
2904 if (trace_empty(iter))
2906 print_trace_header(m, iter);
2907 if (!(trace_flags & TRACE_ITER_VERBOSE))
2908 print_lat_help_header(m);
2910 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2911 if (trace_flags & TRACE_ITER_IRQ_INFO)
2912 print_func_help_header_irq(iter->trace_buffer, m);
2914 print_func_help_header(iter->trace_buffer, m);
2919 static void test_ftrace_alive(struct seq_file *m)
2921 if (!ftrace_is_dead())
2923 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2924 "# MAY BE MISSING FUNCTION EVENTS\n");
2927 #ifdef CONFIG_TRACER_MAX_TRACE
2928 static void show_snapshot_main_help(struct seq_file *m)
2930 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2931 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2932 "# Takes a snapshot of the main buffer.\n"
2933 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2934 "# (Doesn't have to be '2' works with any number that\n"
2935 "# is not a '0' or '1')\n");
2938 static void show_snapshot_percpu_help(struct seq_file *m)
2940 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2941 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2942 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2943 "# Takes a snapshot of the main buffer for this cpu.\n");
2945 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2946 "# Must use main snapshot file to allocate.\n");
2948 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2949 "# (Doesn't have to be '2' works with any number that\n"
2950 "# is not a '0' or '1')\n");
2953 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2955 if (iter->tr->allocated_snapshot)
2956 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
2958 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
2960 seq_puts(m, "# Snapshot commands:\n");
2961 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2962 show_snapshot_main_help(m);
2964 show_snapshot_percpu_help(m);
2967 /* Should never be called */
2968 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2971 static int s_show(struct seq_file *m, void *v)
2973 struct trace_iterator *iter = v;
2976 if (iter->ent == NULL) {
2978 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2980 test_ftrace_alive(m);
2982 if (iter->snapshot && trace_empty(iter))
2983 print_snapshot_help(m, iter);
2984 else if (iter->trace && iter->trace->print_header)
2985 iter->trace->print_header(m);
2987 trace_default_header(m);
2989 } else if (iter->leftover) {
2991 * If we filled the seq_file buffer earlier, we
2992 * want to just show it now.
2994 ret = trace_print_seq(m, &iter->seq);
2996 /* ret should this time be zero, but you never know */
2997 iter->leftover = ret;
3000 print_trace_line(iter);
3001 ret = trace_print_seq(m, &iter->seq);
3003 * If we overflow the seq_file buffer, then it will
3004 * ask us for this data again at start up.
3006 * ret is 0 if seq_file write succeeded.
3009 iter->leftover = ret;
3016 * Should be used after trace_array_get(), trace_types_lock
3017 * ensures that i_cdev was already initialized.
3019 static inline int tracing_get_cpu(struct inode *inode)
3021 if (inode->i_cdev) /* See trace_create_cpu_file() */
3022 return (long)inode->i_cdev - 1;
3023 return RING_BUFFER_ALL_CPUS;
3026 static const struct seq_operations tracer_seq_ops = {
3033 static struct trace_iterator *
3034 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3036 struct trace_array *tr = inode->i_private;
3037 struct trace_iterator *iter;
3040 if (tracing_disabled)
3041 return ERR_PTR(-ENODEV);
3043 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3045 return ERR_PTR(-ENOMEM);
3047 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3049 if (!iter->buffer_iter)
3053 * We make a copy of the current tracer to avoid concurrent
3054 * changes on it while we are reading.
3056 mutex_lock(&trace_types_lock);
3057 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3061 *iter->trace = *tr->current_trace;
3063 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3068 #ifdef CONFIG_TRACER_MAX_TRACE
3069 /* Currently only the top directory has a snapshot */
3070 if (tr->current_trace->print_max || snapshot)
3071 iter->trace_buffer = &tr->max_buffer;
3074 iter->trace_buffer = &tr->trace_buffer;
3075 iter->snapshot = snapshot;
3077 iter->cpu_file = tracing_get_cpu(inode);
3078 mutex_init(&iter->mutex);
3080 /* Notify the tracer early; before we stop tracing. */
3081 if (iter->trace && iter->trace->open)
3082 iter->trace->open(iter);
3084 /* Annotate start of buffers if we had overruns */
3085 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3086 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3088 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3089 if (trace_clocks[tr->clock_id].in_ns)
3090 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3092 /* stop the trace while dumping if we are not opening "snapshot" */
3093 if (!iter->snapshot)
3094 tracing_stop_tr(tr);
3096 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3097 for_each_tracing_cpu(cpu) {
3098 iter->buffer_iter[cpu] =
3099 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3101 ring_buffer_read_prepare_sync();
3102 for_each_tracing_cpu(cpu) {
3103 ring_buffer_read_start(iter->buffer_iter[cpu]);
3104 tracing_iter_reset(iter, cpu);
3107 cpu = iter->cpu_file;
3108 iter->buffer_iter[cpu] =
3109 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3110 ring_buffer_read_prepare_sync();
3111 ring_buffer_read_start(iter->buffer_iter[cpu]);
3112 tracing_iter_reset(iter, cpu);
3115 mutex_unlock(&trace_types_lock);
3120 mutex_unlock(&trace_types_lock);
3122 kfree(iter->buffer_iter);
3124 seq_release_private(inode, file);
3125 return ERR_PTR(-ENOMEM);
3128 int tracing_open_generic(struct inode *inode, struct file *filp)
3130 if (tracing_disabled)
3133 filp->private_data = inode->i_private;
3137 bool tracing_is_disabled(void)
3139 return (tracing_disabled) ? true: false;
3143 * Open and update trace_array ref count.
3144 * Must have the current trace_array passed to it.
3146 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3148 struct trace_array *tr = inode->i_private;
3150 if (tracing_disabled)
3153 if (trace_array_get(tr) < 0)
3156 filp->private_data = inode->i_private;
3161 static int tracing_release(struct inode *inode, struct file *file)
3163 struct trace_array *tr = inode->i_private;
3164 struct seq_file *m = file->private_data;
3165 struct trace_iterator *iter;
3168 if (!(file->f_mode & FMODE_READ)) {
3169 trace_array_put(tr);
3173 /* Writes do not use seq_file */
3175 mutex_lock(&trace_types_lock);
3177 for_each_tracing_cpu(cpu) {
3178 if (iter->buffer_iter[cpu])
3179 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3182 if (iter->trace && iter->trace->close)
3183 iter->trace->close(iter);
3185 if (!iter->snapshot)
3186 /* reenable tracing if it was previously enabled */
3187 tracing_start_tr(tr);
3189 __trace_array_put(tr);
3191 mutex_unlock(&trace_types_lock);
3193 mutex_destroy(&iter->mutex);
3194 free_cpumask_var(iter->started);
3196 kfree(iter->buffer_iter);
3197 seq_release_private(inode, file);
3202 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3204 struct trace_array *tr = inode->i_private;
3206 trace_array_put(tr);
3210 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3212 struct trace_array *tr = inode->i_private;
3214 trace_array_put(tr);
3216 return single_release(inode, file);
3219 static int tracing_open(struct inode *inode, struct file *file)
3221 struct trace_array *tr = inode->i_private;
3222 struct trace_iterator *iter;
3225 if (trace_array_get(tr) < 0)
3228 /* If this file was open for write, then erase contents */
3229 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3230 int cpu = tracing_get_cpu(inode);
3232 if (cpu == RING_BUFFER_ALL_CPUS)
3233 tracing_reset_online_cpus(&tr->trace_buffer);
3235 tracing_reset(&tr->trace_buffer, cpu);
3238 if (file->f_mode & FMODE_READ) {
3239 iter = __tracing_open(inode, file, false);
3241 ret = PTR_ERR(iter);
3242 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
3243 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3247 trace_array_put(tr);
3253 * Some tracers are not suitable for instance buffers.
3254 * A tracer is always available for the global array (toplevel)
3255 * or if it explicitly states that it is.
3258 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3260 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3263 /* Find the next tracer that this trace array may use */
3264 static struct tracer *
3265 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3267 while (t && !trace_ok_for_array(t, tr))
3274 t_next(struct seq_file *m, void *v, loff_t *pos)
3276 struct trace_array *tr = m->private;
3277 struct tracer *t = v;
3282 t = get_tracer_for_array(tr, t->next);
3287 static void *t_start(struct seq_file *m, loff_t *pos)
3289 struct trace_array *tr = m->private;
3293 mutex_lock(&trace_types_lock);
3295 t = get_tracer_for_array(tr, trace_types);
3296 for (; t && l < *pos; t = t_next(m, t, &l))
3302 static void t_stop(struct seq_file *m, void *p)
3304 mutex_unlock(&trace_types_lock);
3307 static int t_show(struct seq_file *m, void *v)
3309 struct tracer *t = v;
3314 seq_puts(m, t->name);
3323 static const struct seq_operations show_traces_seq_ops = {
3330 static int show_traces_open(struct inode *inode, struct file *file)
3332 struct trace_array *tr = inode->i_private;
3336 if (tracing_disabled)
3339 ret = seq_open(file, &show_traces_seq_ops);
3343 m = file->private_data;
3350 tracing_write_stub(struct file *filp, const char __user *ubuf,
3351 size_t count, loff_t *ppos)
3356 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3360 if (file->f_mode & FMODE_READ)
3361 ret = seq_lseek(file, offset, whence);
3363 file->f_pos = ret = 0;
3368 static const struct file_operations tracing_fops = {
3369 .open = tracing_open,
3371 .write = tracing_write_stub,
3372 .llseek = tracing_lseek,
3373 .release = tracing_release,
3376 static const struct file_operations show_traces_fops = {
3377 .open = show_traces_open,
3379 .release = seq_release,
3380 .llseek = seq_lseek,
3384 * The tracer itself will not take this lock, but still we want
3385 * to provide a consistent cpumask to user-space:
3387 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3390 * Temporary storage for the character representation of the
3391 * CPU bitmask (and one more byte for the newline):
3393 static char mask_str[NR_CPUS + 1];
3396 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3397 size_t count, loff_t *ppos)
3399 struct trace_array *tr = file_inode(filp)->i_private;
3402 mutex_lock(&tracing_cpumask_update_lock);
3404 len = snprintf(mask_str, count, "%*pb\n",
3405 cpumask_pr_args(tr->tracing_cpumask));
3410 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3413 mutex_unlock(&tracing_cpumask_update_lock);
3419 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3420 size_t count, loff_t *ppos)
3422 struct trace_array *tr = file_inode(filp)->i_private;
3423 cpumask_var_t tracing_cpumask_new;
3426 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3429 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3433 mutex_lock(&tracing_cpumask_update_lock);
3435 local_irq_disable();
3436 arch_spin_lock(&tr->max_lock);
3437 for_each_tracing_cpu(cpu) {
3439 * Increase/decrease the disabled counter if we are
3440 * about to flip a bit in the cpumask:
3442 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3443 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3444 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3445 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3447 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3448 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3449 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3450 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3453 arch_spin_unlock(&tr->max_lock);
3456 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3458 mutex_unlock(&tracing_cpumask_update_lock);
3459 free_cpumask_var(tracing_cpumask_new);
3464 free_cpumask_var(tracing_cpumask_new);
3469 static const struct file_operations tracing_cpumask_fops = {
3470 .open = tracing_open_generic_tr,
3471 .read = tracing_cpumask_read,
3472 .write = tracing_cpumask_write,
3473 .release = tracing_release_generic_tr,
3474 .llseek = generic_file_llseek,
3477 static int tracing_trace_options_show(struct seq_file *m, void *v)
3479 struct tracer_opt *trace_opts;
3480 struct trace_array *tr = m->private;
3484 mutex_lock(&trace_types_lock);
3485 tracer_flags = tr->current_trace->flags->val;
3486 trace_opts = tr->current_trace->flags->opts;
3488 for (i = 0; trace_options[i]; i++) {
3489 if (tr->trace_flags & (1 << i))
3490 seq_printf(m, "%s\n", trace_options[i]);
3492 seq_printf(m, "no%s\n", trace_options[i]);
3495 for (i = 0; trace_opts[i].name; i++) {
3496 if (tracer_flags & trace_opts[i].bit)
3497 seq_printf(m, "%s\n", trace_opts[i].name);
3499 seq_printf(m, "no%s\n", trace_opts[i].name);
3501 mutex_unlock(&trace_types_lock);
3506 static int __set_tracer_option(struct trace_array *tr,
3507 struct tracer_flags *tracer_flags,
3508 struct tracer_opt *opts, int neg)
3510 struct tracer *trace = tr->current_trace;
3513 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3518 tracer_flags->val &= ~opts->bit;
3520 tracer_flags->val |= opts->bit;
3524 /* Try to assign a tracer specific option */
3525 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3527 struct tracer *trace = tr->current_trace;
3528 struct tracer_flags *tracer_flags = trace->flags;
3529 struct tracer_opt *opts = NULL;
3532 for (i = 0; tracer_flags->opts[i].name; i++) {
3533 opts = &tracer_flags->opts[i];
3535 if (strcmp(cmp, opts->name) == 0)
3536 return __set_tracer_option(tr, trace->flags, opts, neg);
3542 /* Some tracers require overwrite to stay enabled */
3543 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3545 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3551 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3553 /* do nothing if flag is already set */
3554 if (!!(tr->trace_flags & mask) == !!enabled)
3557 /* Give the tracer a chance to approve the change */
3558 if (tr->current_trace->flag_changed)
3559 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3563 tr->trace_flags |= mask;
3565 tr->trace_flags &= ~mask;
3567 if (mask == TRACE_ITER_RECORD_CMD)
3568 trace_event_enable_cmd_record(enabled);
3570 if (mask == TRACE_ITER_OVERWRITE) {
3571 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3572 #ifdef CONFIG_TRACER_MAX_TRACE
3573 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3577 if (mask == TRACE_ITER_PRINTK) {
3578 trace_printk_start_stop_comm(enabled);
3579 trace_printk_control(enabled);
3585 static int trace_set_options(struct trace_array *tr, char *option)
3592 cmp = strstrip(option);
3594 if (strncmp(cmp, "no", 2) == 0) {
3599 mutex_lock(&trace_types_lock);
3601 for (i = 0; trace_options[i]; i++) {
3602 if (strcmp(cmp, trace_options[i]) == 0) {
3603 ret = set_tracer_flag(tr, 1 << i, !neg);
3608 /* If no option could be set, test the specific tracer options */
3609 if (!trace_options[i])
3610 ret = set_tracer_option(tr, cmp, neg);
3612 mutex_unlock(&trace_types_lock);
3618 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3619 size_t cnt, loff_t *ppos)
3621 struct seq_file *m = filp->private_data;
3622 struct trace_array *tr = m->private;
3626 if (cnt >= sizeof(buf))
3629 if (copy_from_user(&buf, ubuf, cnt))
3634 ret = trace_set_options(tr, buf);
3643 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3645 struct trace_array *tr = inode->i_private;
3648 if (tracing_disabled)
3651 if (trace_array_get(tr) < 0)
3654 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3656 trace_array_put(tr);
3661 static const struct file_operations tracing_iter_fops = {
3662 .open = tracing_trace_options_open,
3664 .llseek = seq_lseek,
3665 .release = tracing_single_release_tr,
3666 .write = tracing_trace_options_write,
3669 static const char readme_msg[] =
3670 "tracing mini-HOWTO:\n\n"
3671 "# echo 0 > tracing_on : quick way to disable tracing\n"
3672 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3673 " Important files:\n"
3674 " trace\t\t\t- The static contents of the buffer\n"
3675 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3676 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3677 " current_tracer\t- function and latency tracers\n"
3678 " available_tracers\t- list of configured tracers for current_tracer\n"
3679 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3680 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3681 " trace_clock\t\t-change the clock used to order events\n"
3682 " local: Per cpu clock but may not be synced across CPUs\n"
3683 " global: Synced across CPUs but slows tracing down.\n"
3684 " counter: Not a clock, but just an increment\n"
3685 " uptime: Jiffy counter from time of boot\n"
3686 " perf: Same clock that perf events use\n"
3687 #ifdef CONFIG_X86_64
3688 " x86-tsc: TSC cycle counter\n"
3690 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3691 " tracing_cpumask\t- Limit which CPUs to trace\n"
3692 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3693 "\t\t\t Remove sub-buffer with rmdir\n"
3694 " trace_options\t\t- Set format or modify how tracing happens\n"
3695 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3696 "\t\t\t option name\n"
3697 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3698 #ifdef CONFIG_DYNAMIC_FTRACE
3699 "\n available_filter_functions - list of functions that can be filtered on\n"
3700 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3701 "\t\t\t functions\n"
3702 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3703 "\t modules: Can select a group via module\n"
3704 "\t Format: :mod:<module-name>\n"
3705 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3706 "\t triggers: a command to perform when function is hit\n"
3707 "\t Format: <function>:<trigger>[:count]\n"
3708 "\t trigger: traceon, traceoff\n"
3709 "\t\t enable_event:<system>:<event>\n"
3710 "\t\t disable_event:<system>:<event>\n"
3711 #ifdef CONFIG_STACKTRACE
3714 #ifdef CONFIG_TRACER_SNAPSHOT
3719 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3720 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3721 "\t The first one will disable tracing every time do_fault is hit\n"
3722 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3723 "\t The first time do trap is hit and it disables tracing, the\n"
3724 "\t counter will decrement to 2. If tracing is already disabled,\n"
3725 "\t the counter will not decrement. It only decrements when the\n"
3726 "\t trigger did work\n"
3727 "\t To remove trigger without count:\n"
3728 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3729 "\t To remove trigger with a count:\n"
3730 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3731 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3732 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3733 "\t modules: Can select a group via module command :mod:\n"
3734 "\t Does not accept triggers\n"
3735 #endif /* CONFIG_DYNAMIC_FTRACE */
3736 #ifdef CONFIG_FUNCTION_TRACER
3737 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3740 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3741 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3742 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3743 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3745 #ifdef CONFIG_TRACER_SNAPSHOT
3746 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3747 "\t\t\t snapshot buffer. Read the contents for more\n"
3748 "\t\t\t information\n"
3750 #ifdef CONFIG_STACK_TRACER
3751 " stack_trace\t\t- Shows the max stack trace when active\n"
3752 " stack_max_size\t- Shows current max stack size that was traced\n"
3753 "\t\t\t Write into this file to reset the max size (trigger a\n"
3754 "\t\t\t new trace)\n"
3755 #ifdef CONFIG_DYNAMIC_FTRACE
3756 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3759 #endif /* CONFIG_STACK_TRACER */
3760 " events/\t\t- Directory containing all trace event subsystems:\n"
3761 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3762 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3763 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3765 " filter\t\t- If set, only events passing filter are traced\n"
3766 " events/<system>/<event>/\t- Directory containing control files for\n"
3768 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3769 " filter\t\t- If set, only events passing filter are traced\n"
3770 " trigger\t\t- If set, a command to perform when event is hit\n"
3771 "\t Format: <trigger>[:count][if <filter>]\n"
3772 "\t trigger: traceon, traceoff\n"
3773 "\t enable_event:<system>:<event>\n"
3774 "\t disable_event:<system>:<event>\n"
3775 #ifdef CONFIG_STACKTRACE
3778 #ifdef CONFIG_TRACER_SNAPSHOT
3781 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3782 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3783 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3784 "\t events/block/block_unplug/trigger\n"
3785 "\t The first disables tracing every time block_unplug is hit.\n"
3786 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3787 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3788 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3789 "\t Like function triggers, the counter is only decremented if it\n"
3790 "\t enabled or disabled tracing.\n"
3791 "\t To remove a trigger without a count:\n"
3792 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3793 "\t To remove a trigger with a count:\n"
3794 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3795 "\t Filters can be ignored when removing a trigger.\n"
3799 tracing_readme_read(struct file *filp, char __user *ubuf,
3800 size_t cnt, loff_t *ppos)
3802 return simple_read_from_buffer(ubuf, cnt, ppos,
3803 readme_msg, strlen(readme_msg));
3806 static const struct file_operations tracing_readme_fops = {
3807 .open = tracing_open_generic,
3808 .read = tracing_readme_read,
3809 .llseek = generic_file_llseek,
3812 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3814 unsigned int *ptr = v;
3816 if (*pos || m->count)
3821 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3823 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3832 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3838 arch_spin_lock(&trace_cmdline_lock);
3840 v = &savedcmd->map_cmdline_to_pid[0];
3842 v = saved_cmdlines_next(m, v, &l);
3850 static void saved_cmdlines_stop(struct seq_file *m, void *v)
3852 arch_spin_unlock(&trace_cmdline_lock);
3856 static int saved_cmdlines_show(struct seq_file *m, void *v)
3858 char buf[TASK_COMM_LEN];
3859 unsigned int *pid = v;
3861 __trace_find_cmdline(*pid, buf);
3862 seq_printf(m, "%d %s\n", *pid, buf);
3866 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3867 .start = saved_cmdlines_start,
3868 .next = saved_cmdlines_next,
3869 .stop = saved_cmdlines_stop,
3870 .show = saved_cmdlines_show,
3873 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3875 if (tracing_disabled)
3878 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3881 static const struct file_operations tracing_saved_cmdlines_fops = {
3882 .open = tracing_saved_cmdlines_open,
3884 .llseek = seq_lseek,
3885 .release = seq_release,
3889 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3890 size_t cnt, loff_t *ppos)
3895 arch_spin_lock(&trace_cmdline_lock);
3896 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3897 arch_spin_unlock(&trace_cmdline_lock);
3899 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3902 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3904 kfree(s->saved_cmdlines);
3905 kfree(s->map_cmdline_to_pid);
3909 static int tracing_resize_saved_cmdlines(unsigned int val)
3911 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3913 s = kmalloc(sizeof(*s), GFP_KERNEL);
3917 if (allocate_cmdlines_buffer(val, s) < 0) {
3922 arch_spin_lock(&trace_cmdline_lock);
3923 savedcmd_temp = savedcmd;
3925 arch_spin_unlock(&trace_cmdline_lock);
3926 free_saved_cmdlines_buffer(savedcmd_temp);
3932 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3933 size_t cnt, loff_t *ppos)
3938 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3942 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3943 if (!val || val > PID_MAX_DEFAULT)
3946 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3955 static const struct file_operations tracing_saved_cmdlines_size_fops = {
3956 .open = tracing_open_generic,
3957 .read = tracing_saved_cmdlines_size_read,
3958 .write = tracing_saved_cmdlines_size_write,
3961 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
3962 static union trace_enum_map_item *
3963 update_enum_map(union trace_enum_map_item *ptr)
3965 if (!ptr->map.enum_string) {
3966 if (ptr->tail.next) {
3967 ptr = ptr->tail.next;
3968 /* Set ptr to the next real item (skip head) */
3976 static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
3978 union trace_enum_map_item *ptr = v;
3981 * Paranoid! If ptr points to end, we don't want to increment past it.
3982 * This really should never happen.
3984 ptr = update_enum_map(ptr);
3985 if (WARN_ON_ONCE(!ptr))
3992 ptr = update_enum_map(ptr);
3997 static void *enum_map_start(struct seq_file *m, loff_t *pos)
3999 union trace_enum_map_item *v;
4002 mutex_lock(&trace_enum_mutex);
4004 v = trace_enum_maps;
4008 while (v && l < *pos) {
4009 v = enum_map_next(m, v, &l);
4015 static void enum_map_stop(struct seq_file *m, void *v)
4017 mutex_unlock(&trace_enum_mutex);
4020 static int enum_map_show(struct seq_file *m, void *v)
4022 union trace_enum_map_item *ptr = v;
4024 seq_printf(m, "%s %ld (%s)\n",
4025 ptr->map.enum_string, ptr->map.enum_value,
4031 static const struct seq_operations tracing_enum_map_seq_ops = {
4032 .start = enum_map_start,
4033 .next = enum_map_next,
4034 .stop = enum_map_stop,
4035 .show = enum_map_show,
4038 static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4040 if (tracing_disabled)
4043 return seq_open(filp, &tracing_enum_map_seq_ops);
4046 static const struct file_operations tracing_enum_map_fops = {
4047 .open = tracing_enum_map_open,
4049 .llseek = seq_lseek,
4050 .release = seq_release,
4053 static inline union trace_enum_map_item *
4054 trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4056 /* Return tail of array given the head */
4057 return ptr + ptr->head.length + 1;
4061 trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4064 struct trace_enum_map **stop;
4065 struct trace_enum_map **map;
4066 union trace_enum_map_item *map_array;
4067 union trace_enum_map_item *ptr;
4072 * The trace_enum_maps contains the map plus a head and tail item,
4073 * where the head holds the module and length of array, and the
4074 * tail holds a pointer to the next list.
4076 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4078 pr_warning("Unable to allocate trace enum mapping\n");
4082 mutex_lock(&trace_enum_mutex);
4084 if (!trace_enum_maps)
4085 trace_enum_maps = map_array;
4087 ptr = trace_enum_maps;
4089 ptr = trace_enum_jmp_to_tail(ptr);
4090 if (!ptr->tail.next)
4092 ptr = ptr->tail.next;
4095 ptr->tail.next = map_array;
4097 map_array->head.mod = mod;
4098 map_array->head.length = len;
4101 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4102 map_array->map = **map;
4105 memset(map_array, 0, sizeof(*map_array));
4107 mutex_unlock(&trace_enum_mutex);
4110 static void trace_create_enum_file(struct dentry *d_tracer)
4112 trace_create_file("enum_map", 0444, d_tracer,
4113 NULL, &tracing_enum_map_fops);
4116 #else /* CONFIG_TRACE_ENUM_MAP_FILE */
4117 static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4118 static inline void trace_insert_enum_map_file(struct module *mod,
4119 struct trace_enum_map **start, int len) { }
4120 #endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4122 static void trace_insert_enum_map(struct module *mod,
4123 struct trace_enum_map **start, int len)
4125 struct trace_enum_map **map;
4132 trace_event_enum_update(map, len);
4134 trace_insert_enum_map_file(mod, start, len);
4138 tracing_set_trace_read(struct file *filp, char __user *ubuf,
4139 size_t cnt, loff_t *ppos)
4141 struct trace_array *tr = filp->private_data;
4142 char buf[MAX_TRACER_SIZE+2];
4145 mutex_lock(&trace_types_lock);
4146 r = sprintf(buf, "%s\n", tr->current_trace->name);
4147 mutex_unlock(&trace_types_lock);
4149 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4152 int tracer_init(struct tracer *t, struct trace_array *tr)
4154 tracing_reset_online_cpus(&tr->trace_buffer);
4158 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
4162 for_each_tracing_cpu(cpu)
4163 per_cpu_ptr(buf->data, cpu)->entries = val;
4166 #ifdef CONFIG_TRACER_MAX_TRACE
4167 /* resize @tr's buffer to the size of @size_tr's entries */
4168 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4169 struct trace_buffer *size_buf, int cpu_id)
4173 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4174 for_each_tracing_cpu(cpu) {
4175 ret = ring_buffer_resize(trace_buf->buffer,
4176 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
4179 per_cpu_ptr(trace_buf->data, cpu)->entries =
4180 per_cpu_ptr(size_buf->data, cpu)->entries;
4183 ret = ring_buffer_resize(trace_buf->buffer,
4184 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
4186 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4187 per_cpu_ptr(size_buf->data, cpu_id)->entries;
4192 #endif /* CONFIG_TRACER_MAX_TRACE */
4194 static int __tracing_resize_ring_buffer(struct trace_array *tr,
4195 unsigned long size, int cpu)
4200 * If kernel or user changes the size of the ring buffer
4201 * we use the size that was given, and we can forget about
4202 * expanding it later.
4204 ring_buffer_expanded = true;
4206 /* May be called before buffers are initialized */
4207 if (!tr->trace_buffer.buffer)
4210 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
4214 #ifdef CONFIG_TRACER_MAX_TRACE
4215 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4216 !tr->current_trace->use_max_tr)
4219 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
4221 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4222 &tr->trace_buffer, cpu);
4225 * AARGH! We are left with different
4226 * size max buffer!!!!
4227 * The max buffer is our "snapshot" buffer.
4228 * When a tracer needs a snapshot (one of the
4229 * latency tracers), it swaps the max buffer
4230 * with the saved snap shot. We succeeded to
4231 * update the size of the main buffer, but failed to
4232 * update the size of the max buffer. But when we tried
4233 * to reset the main buffer to the original size, we
4234 * failed there too. This is very unlikely to
4235 * happen, but if it does, warn and kill all
4239 tracing_disabled = 1;
4244 if (cpu == RING_BUFFER_ALL_CPUS)
4245 set_buffer_entries(&tr->max_buffer, size);
4247 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4250 #endif /* CONFIG_TRACER_MAX_TRACE */
4252 if (cpu == RING_BUFFER_ALL_CPUS)
4253 set_buffer_entries(&tr->trace_buffer, size);
4255 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4260 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4261 unsigned long size, int cpu_id)
4265 mutex_lock(&trace_types_lock);
4267 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4268 /* make sure, this cpu is enabled in the mask */
4269 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4275 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4280 mutex_unlock(&trace_types_lock);
4287 * tracing_update_buffers - used by tracing facility to expand ring buffers
4289 * To save on memory when the tracing is never used on a system with it
4290 * configured in. The ring buffers are set to a minimum size. But once
4291 * a user starts to use the tracing facility, then they need to grow
4292 * to their default size.
4294 * This function is to be called when a tracer is about to be used.
4296 int tracing_update_buffers(void)
4300 mutex_lock(&trace_types_lock);
4301 if (!ring_buffer_expanded)
4302 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4303 RING_BUFFER_ALL_CPUS);
4304 mutex_unlock(&trace_types_lock);
4309 struct trace_option_dentry;
4312 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4315 * Used to clear out the tracer before deletion of an instance.
4316 * Must have trace_types_lock held.
4318 static void tracing_set_nop(struct trace_array *tr)
4320 if (tr->current_trace == &nop_trace)
4323 tr->current_trace->enabled--;
4325 if (tr->current_trace->reset)
4326 tr->current_trace->reset(tr);
4328 tr->current_trace = &nop_trace;
4331 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
4333 /* Only enable if the directory has been created already. */
4337 create_trace_option_files(tr, t);
4340 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4343 #ifdef CONFIG_TRACER_MAX_TRACE
4348 mutex_lock(&trace_types_lock);
4350 if (!ring_buffer_expanded) {
4351 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4352 RING_BUFFER_ALL_CPUS);
4358 for (t = trace_types; t; t = t->next) {
4359 if (strcmp(t->name, buf) == 0)
4366 if (t == tr->current_trace)
4369 /* Some tracers are only allowed for the top level buffer */
4370 if (!trace_ok_for_array(t, tr)) {
4375 /* If trace pipe files are being read, we can't change the tracer */
4376 if (tr->current_trace->ref) {
4381 trace_branch_disable();
4383 tr->current_trace->enabled--;
4385 if (tr->current_trace->reset)
4386 tr->current_trace->reset(tr);
4388 /* Current trace needs to be nop_trace before synchronize_sched */
4389 tr->current_trace = &nop_trace;
4391 #ifdef CONFIG_TRACER_MAX_TRACE
4392 had_max_tr = tr->allocated_snapshot;
4394 if (had_max_tr && !t->use_max_tr) {
4396 * We need to make sure that the update_max_tr sees that
4397 * current_trace changed to nop_trace to keep it from
4398 * swapping the buffers after we resize it.
4399 * The update_max_tr is called from interrupts disabled
4400 * so a synchronized_sched() is sufficient.
4402 synchronize_sched();
4407 #ifdef CONFIG_TRACER_MAX_TRACE
4408 if (t->use_max_tr && !had_max_tr) {
4409 ret = alloc_snapshot(tr);
4416 ret = tracer_init(t, tr);
4421 tr->current_trace = t;
4422 tr->current_trace->enabled++;
4423 trace_branch_enable(tr);
4425 mutex_unlock(&trace_types_lock);
4431 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4432 size_t cnt, loff_t *ppos)
4434 struct trace_array *tr = filp->private_data;
4435 char buf[MAX_TRACER_SIZE+1];
4442 if (cnt > MAX_TRACER_SIZE)
4443 cnt = MAX_TRACER_SIZE;
4445 if (copy_from_user(&buf, ubuf, cnt))
4450 /* strip ending whitespace. */
4451 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4454 err = tracing_set_tracer(tr, buf);
4464 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4465 size_t cnt, loff_t *ppos)
4470 r = snprintf(buf, sizeof(buf), "%ld\n",
4471 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4472 if (r > sizeof(buf))
4474 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4478 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4479 size_t cnt, loff_t *ppos)
4484 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4494 tracing_thresh_read(struct file *filp, char __user *ubuf,
4495 size_t cnt, loff_t *ppos)
4497 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4501 tracing_thresh_write(struct file *filp, const char __user *ubuf,
4502 size_t cnt, loff_t *ppos)
4504 struct trace_array *tr = filp->private_data;
4507 mutex_lock(&trace_types_lock);
4508 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4512 if (tr->current_trace->update_thresh) {
4513 ret = tr->current_trace->update_thresh(tr);
4520 mutex_unlock(&trace_types_lock);
4526 tracing_max_lat_read(struct file *filp, char __user *ubuf,
4527 size_t cnt, loff_t *ppos)
4529 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4533 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4534 size_t cnt, loff_t *ppos)
4536 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4539 static int tracing_open_pipe(struct inode *inode, struct file *filp)
4541 struct trace_array *tr = inode->i_private;
4542 struct trace_iterator *iter;
4545 if (tracing_disabled)
4548 if (trace_array_get(tr) < 0)
4551 mutex_lock(&trace_types_lock);
4553 /* create a buffer to store the information to pass to userspace */
4554 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4557 __trace_array_put(tr);
4561 trace_seq_init(&iter->seq);
4562 iter->trace = tr->current_trace;
4564 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4569 /* trace pipe does not show start of buffer */
4570 cpumask_setall(iter->started);
4572 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4573 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4575 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4576 if (trace_clocks[tr->clock_id].in_ns)
4577 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4580 iter->trace_buffer = &tr->trace_buffer;
4581 iter->cpu_file = tracing_get_cpu(inode);
4582 mutex_init(&iter->mutex);
4583 filp->private_data = iter;
4585 if (iter->trace->pipe_open)
4586 iter->trace->pipe_open(iter);
4588 nonseekable_open(inode, filp);
4590 tr->current_trace->ref++;
4592 mutex_unlock(&trace_types_lock);
4598 __trace_array_put(tr);
4599 mutex_unlock(&trace_types_lock);
4603 static int tracing_release_pipe(struct inode *inode, struct file *file)
4605 struct trace_iterator *iter = file->private_data;
4606 struct trace_array *tr = inode->i_private;
4608 mutex_lock(&trace_types_lock);
4610 tr->current_trace->ref--;
4612 if (iter->trace->pipe_close)
4613 iter->trace->pipe_close(iter);
4615 mutex_unlock(&trace_types_lock);
4617 free_cpumask_var(iter->started);
4618 mutex_destroy(&iter->mutex);
4621 trace_array_put(tr);
4627 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4629 struct trace_array *tr = iter->tr;
4631 /* Iterators are static, they should be filled or empty */
4632 if (trace_buffer_iter(iter, iter->cpu_file))
4633 return POLLIN | POLLRDNORM;
4635 if (tr->trace_flags & TRACE_ITER_BLOCK)
4637 * Always select as readable when in blocking mode
4639 return POLLIN | POLLRDNORM;
4641 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4646 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4648 struct trace_iterator *iter = filp->private_data;
4650 return trace_poll(iter, filp, poll_table);
4653 /* Must be called with iter->mutex held. */
4654 static int tracing_wait_pipe(struct file *filp)
4656 struct trace_iterator *iter = filp->private_data;
4659 while (trace_empty(iter)) {
4661 if ((filp->f_flags & O_NONBLOCK)) {
4666 * We block until we read something and tracing is disabled.
4667 * We still block if tracing is disabled, but we have never
4668 * read anything. This allows a user to cat this file, and
4669 * then enable tracing. But after we have read something,
4670 * we give an EOF when tracing is again disabled.
4672 * iter->pos will be 0 if we haven't read anything.
4674 if (!tracing_is_on() && iter->pos)
4677 mutex_unlock(&iter->mutex);
4679 ret = wait_on_pipe(iter, false);
4681 mutex_lock(&iter->mutex);
4694 tracing_read_pipe(struct file *filp, char __user *ubuf,
4695 size_t cnt, loff_t *ppos)
4697 struct trace_iterator *iter = filp->private_data;
4700 /* return any leftover data */
4701 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4705 trace_seq_init(&iter->seq);
4708 * Avoid more than one consumer on a single file descriptor
4709 * This is just a matter of traces coherency, the ring buffer itself
4712 mutex_lock(&iter->mutex);
4713 if (iter->trace->read) {
4714 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4720 sret = tracing_wait_pipe(filp);
4724 /* stop when tracing is finished */
4725 if (trace_empty(iter)) {
4730 if (cnt >= PAGE_SIZE)
4731 cnt = PAGE_SIZE - 1;
4733 /* reset all but tr, trace, and overruns */
4734 memset(&iter->seq, 0,
4735 sizeof(struct trace_iterator) -
4736 offsetof(struct trace_iterator, seq));
4737 cpumask_clear(iter->started);
4740 trace_event_read_lock();
4741 trace_access_lock(iter->cpu_file);
4742 while (trace_find_next_entry_inc(iter) != NULL) {
4743 enum print_line_t ret;
4744 int save_len = iter->seq.seq.len;
4746 ret = print_trace_line(iter);
4747 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4748 /* don't print partial lines */
4749 iter->seq.seq.len = save_len;
4752 if (ret != TRACE_TYPE_NO_CONSUME)
4753 trace_consume(iter);
4755 if (trace_seq_used(&iter->seq) >= cnt)
4759 * Setting the full flag means we reached the trace_seq buffer
4760 * size and we should leave by partial output condition above.
4761 * One of the trace_seq_* functions is not used properly.
4763 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4766 trace_access_unlock(iter->cpu_file);
4767 trace_event_read_unlock();
4769 /* Now copy what we have to the user */
4770 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4771 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4772 trace_seq_init(&iter->seq);
4775 * If there was nothing to send to user, in spite of consuming trace
4776 * entries, go back to wait for more entries.
4782 mutex_unlock(&iter->mutex);
4787 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4790 __free_page(spd->pages[idx]);
4793 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4795 .confirm = generic_pipe_buf_confirm,
4796 .release = generic_pipe_buf_release,
4797 .steal = generic_pipe_buf_steal,
4798 .get = generic_pipe_buf_get,
4802 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4808 /* Seq buffer is page-sized, exactly what we need. */
4810 save_len = iter->seq.seq.len;
4811 ret = print_trace_line(iter);
4813 if (trace_seq_has_overflowed(&iter->seq)) {
4814 iter->seq.seq.len = save_len;
4819 * This should not be hit, because it should only
4820 * be set if the iter->seq overflowed. But check it
4821 * anyway to be safe.
4823 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4824 iter->seq.seq.len = save_len;
4828 count = trace_seq_used(&iter->seq) - save_len;
4831 iter->seq.seq.len = save_len;
4835 if (ret != TRACE_TYPE_NO_CONSUME)
4836 trace_consume(iter);
4838 if (!trace_find_next_entry_inc(iter)) {
4848 static ssize_t tracing_splice_read_pipe(struct file *filp,
4850 struct pipe_inode_info *pipe,
4854 struct page *pages_def[PIPE_DEF_BUFFERS];
4855 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4856 struct trace_iterator *iter = filp->private_data;
4857 struct splice_pipe_desc spd = {
4859 .partial = partial_def,
4860 .nr_pages = 0, /* This gets updated below. */
4861 .nr_pages_max = PIPE_DEF_BUFFERS,
4863 .ops = &tracing_pipe_buf_ops,
4864 .spd_release = tracing_spd_release_pipe,
4870 if (splice_grow_spd(pipe, &spd))
4873 mutex_lock(&iter->mutex);
4875 if (iter->trace->splice_read) {
4876 ret = iter->trace->splice_read(iter, filp,
4877 ppos, pipe, len, flags);
4882 ret = tracing_wait_pipe(filp);
4886 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4891 trace_event_read_lock();
4892 trace_access_lock(iter->cpu_file);
4894 /* Fill as many pages as possible. */
4895 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4896 spd.pages[i] = alloc_page(GFP_KERNEL);
4900 rem = tracing_fill_pipe_page(rem, iter);
4902 /* Copy the data into the page, so we can start over. */
4903 ret = trace_seq_to_buffer(&iter->seq,
4904 page_address(spd.pages[i]),
4905 trace_seq_used(&iter->seq));
4907 __free_page(spd.pages[i]);
4910 spd.partial[i].offset = 0;
4911 spd.partial[i].len = trace_seq_used(&iter->seq);
4913 trace_seq_init(&iter->seq);
4916 trace_access_unlock(iter->cpu_file);
4917 trace_event_read_unlock();
4918 mutex_unlock(&iter->mutex);
4922 ret = splice_to_pipe(pipe, &spd);
4924 splice_shrink_spd(&spd);
4928 mutex_unlock(&iter->mutex);
4933 tracing_entries_read(struct file *filp, char __user *ubuf,
4934 size_t cnt, loff_t *ppos)
4936 struct inode *inode = file_inode(filp);
4937 struct trace_array *tr = inode->i_private;
4938 int cpu = tracing_get_cpu(inode);
4943 mutex_lock(&trace_types_lock);
4945 if (cpu == RING_BUFFER_ALL_CPUS) {
4946 int cpu, buf_size_same;
4951 /* check if all cpu sizes are same */
4952 for_each_tracing_cpu(cpu) {
4953 /* fill in the size from first enabled cpu */
4955 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4956 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4962 if (buf_size_same) {
4963 if (!ring_buffer_expanded)
4964 r = sprintf(buf, "%lu (expanded: %lu)\n",
4966 trace_buf_size >> 10);
4968 r = sprintf(buf, "%lu\n", size >> 10);
4970 r = sprintf(buf, "X\n");
4972 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
4974 mutex_unlock(&trace_types_lock);
4976 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4981 tracing_entries_write(struct file *filp, const char __user *ubuf,
4982 size_t cnt, loff_t *ppos)
4984 struct inode *inode = file_inode(filp);
4985 struct trace_array *tr = inode->i_private;
4989 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4993 /* must have at least 1 entry */
4997 /* value is in KB */
4999 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
5009 tracing_total_entries_read(struct file *filp, char __user *ubuf,
5010 size_t cnt, loff_t *ppos)
5012 struct trace_array *tr = filp->private_data;
5015 unsigned long size = 0, expanded_size = 0;
5017 mutex_lock(&trace_types_lock);
5018 for_each_tracing_cpu(cpu) {
5019 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
5020 if (!ring_buffer_expanded)
5021 expanded_size += trace_buf_size >> 10;
5023 if (ring_buffer_expanded)
5024 r = sprintf(buf, "%lu\n", size);
5026 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5027 mutex_unlock(&trace_types_lock);
5029 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5033 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5034 size_t cnt, loff_t *ppos)
5037 * There is no need to read what the user has written, this function
5038 * is just to make sure that there is no error when "echo" is used
5047 tracing_free_buffer_release(struct inode *inode, struct file *filp)
5049 struct trace_array *tr = inode->i_private;
5051 /* disable tracing ? */
5052 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
5053 tracer_tracing_off(tr);
5054 /* resize the ring buffer to 0 */
5055 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
5057 trace_array_put(tr);
5063 tracing_mark_write(struct file *filp, const char __user *ubuf,
5064 size_t cnt, loff_t *fpos)
5066 unsigned long addr = (unsigned long)ubuf;
5067 struct trace_array *tr = filp->private_data;
5068 struct ring_buffer_event *event;
5069 struct ring_buffer *buffer;
5070 struct print_entry *entry;
5071 unsigned long irq_flags;
5072 struct page *pages[2];
5082 if (tracing_disabled)
5085 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5088 if (cnt > TRACE_BUF_SIZE)
5089 cnt = TRACE_BUF_SIZE;
5092 * Userspace is injecting traces into the kernel trace buffer.
5093 * We want to be as non intrusive as possible.
5094 * To do so, we do not want to allocate any special buffers
5095 * or take any locks, but instead write the userspace data
5096 * straight into the ring buffer.
5098 * First we need to pin the userspace buffer into memory,
5099 * which, most likely it is, because it just referenced it.
5100 * But there's no guarantee that it is. By using get_user_pages_fast()
5101 * and kmap_atomic/kunmap_atomic() we can get access to the
5102 * pages directly. We then write the data directly into the
5105 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5107 /* check if we cross pages */
5108 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5111 offset = addr & (PAGE_SIZE - 1);
5114 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5115 if (ret < nr_pages) {
5117 put_page(pages[ret]);
5122 for (i = 0; i < nr_pages; i++)
5123 map_page[i] = kmap_atomic(pages[i]);
5125 local_save_flags(irq_flags);
5126 size = sizeof(*entry) + cnt + 2; /* possible \n added */
5127 buffer = tr->trace_buffer.buffer;
5128 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5129 irq_flags, preempt_count());
5131 /* Ring buffer disabled, return as if not open for write */
5136 entry = ring_buffer_event_data(event);
5137 entry->ip = _THIS_IP_;
5139 if (nr_pages == 2) {
5140 len = PAGE_SIZE - offset;
5141 memcpy(&entry->buf, map_page[0] + offset, len);
5142 memcpy(&entry->buf[len], map_page[1], cnt - len);
5144 memcpy(&entry->buf, map_page[0] + offset, cnt);
5146 if (entry->buf[cnt - 1] != '\n') {
5147 entry->buf[cnt] = '\n';
5148 entry->buf[cnt + 1] = '\0';
5150 entry->buf[cnt] = '\0';
5152 __buffer_unlock_commit(buffer, event);
5159 for (i = nr_pages - 1; i >= 0; i--) {
5160 kunmap_atomic(map_page[i]);
5167 static int tracing_clock_show(struct seq_file *m, void *v)
5169 struct trace_array *tr = m->private;
5172 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
5174 "%s%s%s%s", i ? " " : "",
5175 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5176 i == tr->clock_id ? "]" : "");
5182 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5186 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5187 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5190 if (i == ARRAY_SIZE(trace_clocks))
5193 mutex_lock(&trace_types_lock);
5197 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5200 * New clock may not be consistent with the previous clock.
5201 * Reset the buffer so that it doesn't have incomparable timestamps.
5203 tracing_reset_online_cpus(&tr->trace_buffer);
5205 #ifdef CONFIG_TRACER_MAX_TRACE
5206 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5207 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
5208 tracing_reset_online_cpus(&tr->max_buffer);
5211 mutex_unlock(&trace_types_lock);
5216 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5217 size_t cnt, loff_t *fpos)
5219 struct seq_file *m = filp->private_data;
5220 struct trace_array *tr = m->private;
5222 const char *clockstr;
5225 if (cnt >= sizeof(buf))
5228 if (copy_from_user(&buf, ubuf, cnt))
5233 clockstr = strstrip(buf);
5235 ret = tracing_set_clock(tr, clockstr);
5244 static int tracing_clock_open(struct inode *inode, struct file *file)
5246 struct trace_array *tr = inode->i_private;
5249 if (tracing_disabled)
5252 if (trace_array_get(tr))
5255 ret = single_open(file, tracing_clock_show, inode->i_private);
5257 trace_array_put(tr);
5262 struct ftrace_buffer_info {
5263 struct trace_iterator iter;
5268 #ifdef CONFIG_TRACER_SNAPSHOT
5269 static int tracing_snapshot_open(struct inode *inode, struct file *file)
5271 struct trace_array *tr = inode->i_private;
5272 struct trace_iterator *iter;
5276 if (trace_array_get(tr) < 0)
5279 if (file->f_mode & FMODE_READ) {
5280 iter = __tracing_open(inode, file, true);
5282 ret = PTR_ERR(iter);
5284 /* Writes still need the seq_file to hold the private data */
5286 m = kzalloc(sizeof(*m), GFP_KERNEL);
5289 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5297 iter->trace_buffer = &tr->max_buffer;
5298 iter->cpu_file = tracing_get_cpu(inode);
5300 file->private_data = m;
5304 trace_array_put(tr);
5310 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5313 struct seq_file *m = filp->private_data;
5314 struct trace_iterator *iter = m->private;
5315 struct trace_array *tr = iter->tr;
5319 ret = tracing_update_buffers();
5323 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5327 mutex_lock(&trace_types_lock);
5329 if (tr->current_trace->use_max_tr) {
5336 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5340 if (tr->allocated_snapshot)
5344 /* Only allow per-cpu swap if the ring buffer supports it */
5345 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5346 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5351 if (!tr->allocated_snapshot) {
5352 ret = alloc_snapshot(tr);
5356 local_irq_disable();
5357 /* Now, we're going to swap */
5358 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5359 update_max_tr(tr, current, smp_processor_id());
5361 update_max_tr_single(tr, current, iter->cpu_file);
5365 if (tr->allocated_snapshot) {
5366 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5367 tracing_reset_online_cpus(&tr->max_buffer);
5369 tracing_reset(&tr->max_buffer, iter->cpu_file);
5379 mutex_unlock(&trace_types_lock);
5383 static int tracing_snapshot_release(struct inode *inode, struct file *file)
5385 struct seq_file *m = file->private_data;
5388 ret = tracing_release(inode, file);
5390 if (file->f_mode & FMODE_READ)
5393 /* If write only, the seq_file is just a stub */
5401 static int tracing_buffers_open(struct inode *inode, struct file *filp);
5402 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5403 size_t count, loff_t *ppos);
5404 static int tracing_buffers_release(struct inode *inode, struct file *file);
5405 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5406 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5408 static int snapshot_raw_open(struct inode *inode, struct file *filp)
5410 struct ftrace_buffer_info *info;
5413 ret = tracing_buffers_open(inode, filp);
5417 info = filp->private_data;
5419 if (info->iter.trace->use_max_tr) {
5420 tracing_buffers_release(inode, filp);
5424 info->iter.snapshot = true;
5425 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5430 #endif /* CONFIG_TRACER_SNAPSHOT */
5433 static const struct file_operations tracing_thresh_fops = {
5434 .open = tracing_open_generic,
5435 .read = tracing_thresh_read,
5436 .write = tracing_thresh_write,
5437 .llseek = generic_file_llseek,
5440 static const struct file_operations tracing_max_lat_fops = {
5441 .open = tracing_open_generic,
5442 .read = tracing_max_lat_read,
5443 .write = tracing_max_lat_write,
5444 .llseek = generic_file_llseek,
5447 static const struct file_operations set_tracer_fops = {
5448 .open = tracing_open_generic,
5449 .read = tracing_set_trace_read,
5450 .write = tracing_set_trace_write,
5451 .llseek = generic_file_llseek,
5454 static const struct file_operations tracing_pipe_fops = {
5455 .open = tracing_open_pipe,
5456 .poll = tracing_poll_pipe,
5457 .read = tracing_read_pipe,
5458 .splice_read = tracing_splice_read_pipe,
5459 .release = tracing_release_pipe,
5460 .llseek = no_llseek,
5463 static const struct file_operations tracing_entries_fops = {
5464 .open = tracing_open_generic_tr,
5465 .read = tracing_entries_read,
5466 .write = tracing_entries_write,
5467 .llseek = generic_file_llseek,
5468 .release = tracing_release_generic_tr,
5471 static const struct file_operations tracing_total_entries_fops = {
5472 .open = tracing_open_generic_tr,
5473 .read = tracing_total_entries_read,
5474 .llseek = generic_file_llseek,
5475 .release = tracing_release_generic_tr,
5478 static const struct file_operations tracing_free_buffer_fops = {
5479 .open = tracing_open_generic_tr,
5480 .write = tracing_free_buffer_write,
5481 .release = tracing_free_buffer_release,
5484 static const struct file_operations tracing_mark_fops = {
5485 .open = tracing_open_generic_tr,
5486 .write = tracing_mark_write,
5487 .llseek = generic_file_llseek,
5488 .release = tracing_release_generic_tr,
5491 static const struct file_operations trace_clock_fops = {
5492 .open = tracing_clock_open,
5494 .llseek = seq_lseek,
5495 .release = tracing_single_release_tr,
5496 .write = tracing_clock_write,
5499 #ifdef CONFIG_TRACER_SNAPSHOT
5500 static const struct file_operations snapshot_fops = {
5501 .open = tracing_snapshot_open,
5503 .write = tracing_snapshot_write,
5504 .llseek = tracing_lseek,
5505 .release = tracing_snapshot_release,
5508 static const struct file_operations snapshot_raw_fops = {
5509 .open = snapshot_raw_open,
5510 .read = tracing_buffers_read,
5511 .release = tracing_buffers_release,
5512 .splice_read = tracing_buffers_splice_read,
5513 .llseek = no_llseek,
5516 #endif /* CONFIG_TRACER_SNAPSHOT */
5518 static int tracing_buffers_open(struct inode *inode, struct file *filp)
5520 struct trace_array *tr = inode->i_private;
5521 struct ftrace_buffer_info *info;
5524 if (tracing_disabled)
5527 if (trace_array_get(tr) < 0)
5530 info = kzalloc(sizeof(*info), GFP_KERNEL);
5532 trace_array_put(tr);
5536 mutex_lock(&trace_types_lock);
5539 info->iter.cpu_file = tracing_get_cpu(inode);
5540 info->iter.trace = tr->current_trace;
5541 info->iter.trace_buffer = &tr->trace_buffer;
5543 /* Force reading ring buffer for first read */
5544 info->read = (unsigned int)-1;
5546 filp->private_data = info;
5548 tr->current_trace->ref++;
5550 mutex_unlock(&trace_types_lock);
5552 ret = nonseekable_open(inode, filp);
5554 trace_array_put(tr);
5560 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5562 struct ftrace_buffer_info *info = filp->private_data;
5563 struct trace_iterator *iter = &info->iter;
5565 return trace_poll(iter, filp, poll_table);
5569 tracing_buffers_read(struct file *filp, char __user *ubuf,
5570 size_t count, loff_t *ppos)
5572 struct ftrace_buffer_info *info = filp->private_data;
5573 struct trace_iterator *iter = &info->iter;
5580 #ifdef CONFIG_TRACER_MAX_TRACE
5581 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5586 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5591 /* Do we have previous read data to read? */
5592 if (info->read < PAGE_SIZE)
5596 trace_access_lock(iter->cpu_file);
5597 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5601 trace_access_unlock(iter->cpu_file);
5604 if (trace_empty(iter)) {
5605 if ((filp->f_flags & O_NONBLOCK))
5608 ret = wait_on_pipe(iter, false);
5619 size = PAGE_SIZE - info->read;
5623 ret = copy_to_user(ubuf, info->spare + info->read, size);
5635 static int tracing_buffers_release(struct inode *inode, struct file *file)
5637 struct ftrace_buffer_info *info = file->private_data;
5638 struct trace_iterator *iter = &info->iter;
5640 mutex_lock(&trace_types_lock);
5642 iter->tr->current_trace->ref--;
5644 __trace_array_put(iter->tr);
5647 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5650 mutex_unlock(&trace_types_lock);
5656 struct ring_buffer *buffer;
5661 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5662 struct pipe_buffer *buf)
5664 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5669 ring_buffer_free_read_page(ref->buffer, ref->page);
5674 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5675 struct pipe_buffer *buf)
5677 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5682 /* Pipe buffer operations for a buffer. */
5683 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5685 .confirm = generic_pipe_buf_confirm,
5686 .release = buffer_pipe_buf_release,
5687 .steal = generic_pipe_buf_steal,
5688 .get = buffer_pipe_buf_get,
5692 * Callback from splice_to_pipe(), if we need to release some pages
5693 * at the end of the spd in case we error'ed out in filling the pipe.
5695 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5697 struct buffer_ref *ref =
5698 (struct buffer_ref *)spd->partial[i].private;
5703 ring_buffer_free_read_page(ref->buffer, ref->page);
5705 spd->partial[i].private = 0;
5709 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5710 struct pipe_inode_info *pipe, size_t len,
5713 struct ftrace_buffer_info *info = file->private_data;
5714 struct trace_iterator *iter = &info->iter;
5715 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5716 struct page *pages_def[PIPE_DEF_BUFFERS];
5717 struct splice_pipe_desc spd = {
5719 .partial = partial_def,
5720 .nr_pages_max = PIPE_DEF_BUFFERS,
5722 .ops = &buffer_pipe_buf_ops,
5723 .spd_release = buffer_spd_release,
5725 struct buffer_ref *ref;
5726 int entries, size, i;
5729 #ifdef CONFIG_TRACER_MAX_TRACE
5730 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5734 if (splice_grow_spd(pipe, &spd))
5737 if (*ppos & (PAGE_SIZE - 1))
5740 if (len & (PAGE_SIZE - 1)) {
5741 if (len < PAGE_SIZE)
5747 trace_access_lock(iter->cpu_file);
5748 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5750 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5754 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5761 ref->buffer = iter->trace_buffer->buffer;
5762 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5769 r = ring_buffer_read_page(ref->buffer, &ref->page,
5770 len, iter->cpu_file, 1);
5772 ring_buffer_free_read_page(ref->buffer, ref->page);
5778 * zero out any left over data, this is going to
5781 size = ring_buffer_page_len(ref->page);
5782 if (size < PAGE_SIZE)
5783 memset(ref->page + size, 0, PAGE_SIZE - size);
5785 page = virt_to_page(ref->page);
5787 spd.pages[i] = page;
5788 spd.partial[i].len = PAGE_SIZE;
5789 spd.partial[i].offset = 0;
5790 spd.partial[i].private = (unsigned long)ref;
5794 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5797 trace_access_unlock(iter->cpu_file);
5800 /* did we read anything? */
5801 if (!spd.nr_pages) {
5805 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5808 ret = wait_on_pipe(iter, true);
5815 ret = splice_to_pipe(pipe, &spd);
5816 splice_shrink_spd(&spd);
5821 static const struct file_operations tracing_buffers_fops = {
5822 .open = tracing_buffers_open,
5823 .read = tracing_buffers_read,
5824 .poll = tracing_buffers_poll,
5825 .release = tracing_buffers_release,
5826 .splice_read = tracing_buffers_splice_read,
5827 .llseek = no_llseek,
5831 tracing_stats_read(struct file *filp, char __user *ubuf,
5832 size_t count, loff_t *ppos)
5834 struct inode *inode = file_inode(filp);
5835 struct trace_array *tr = inode->i_private;
5836 struct trace_buffer *trace_buf = &tr->trace_buffer;
5837 int cpu = tracing_get_cpu(inode);
5838 struct trace_seq *s;
5840 unsigned long long t;
5841 unsigned long usec_rem;
5843 s = kmalloc(sizeof(*s), GFP_KERNEL);
5849 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5850 trace_seq_printf(s, "entries: %ld\n", cnt);
5852 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5853 trace_seq_printf(s, "overrun: %ld\n", cnt);
5855 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5856 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5858 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5859 trace_seq_printf(s, "bytes: %ld\n", cnt);
5861 if (trace_clocks[tr->clock_id].in_ns) {
5862 /* local or global for trace_clock */
5863 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5864 usec_rem = do_div(t, USEC_PER_SEC);
5865 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5868 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5869 usec_rem = do_div(t, USEC_PER_SEC);
5870 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5872 /* counter or tsc mode for trace_clock */
5873 trace_seq_printf(s, "oldest event ts: %llu\n",
5874 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5876 trace_seq_printf(s, "now ts: %llu\n",
5877 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5880 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5881 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5883 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5884 trace_seq_printf(s, "read events: %ld\n", cnt);
5886 count = simple_read_from_buffer(ubuf, count, ppos,
5887 s->buffer, trace_seq_used(s));
5894 static const struct file_operations tracing_stats_fops = {
5895 .open = tracing_open_generic_tr,
5896 .read = tracing_stats_read,
5897 .llseek = generic_file_llseek,
5898 .release = tracing_release_generic_tr,
5901 #ifdef CONFIG_DYNAMIC_FTRACE
5903 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5909 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5910 size_t cnt, loff_t *ppos)
5912 static char ftrace_dyn_info_buffer[1024];
5913 static DEFINE_MUTEX(dyn_info_mutex);
5914 unsigned long *p = filp->private_data;
5915 char *buf = ftrace_dyn_info_buffer;
5916 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5919 mutex_lock(&dyn_info_mutex);
5920 r = sprintf(buf, "%ld ", *p);
5922 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5925 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5927 mutex_unlock(&dyn_info_mutex);
5932 static const struct file_operations tracing_dyn_info_fops = {
5933 .open = tracing_open_generic,
5934 .read = tracing_read_dyn_info,
5935 .llseek = generic_file_llseek,
5937 #endif /* CONFIG_DYNAMIC_FTRACE */
5939 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5941 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5947 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5949 unsigned long *count = (long *)data;
5961 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5962 struct ftrace_probe_ops *ops, void *data)
5964 long count = (long)data;
5966 seq_printf(m, "%ps:", (void *)ip);
5968 seq_puts(m, "snapshot");
5971 seq_puts(m, ":unlimited\n");
5973 seq_printf(m, ":count=%ld\n", count);
5978 static struct ftrace_probe_ops snapshot_probe_ops = {
5979 .func = ftrace_snapshot,
5980 .print = ftrace_snapshot_print,
5983 static struct ftrace_probe_ops snapshot_count_probe_ops = {
5984 .func = ftrace_count_snapshot,
5985 .print = ftrace_snapshot_print,
5989 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5990 char *glob, char *cmd, char *param, int enable)
5992 struct ftrace_probe_ops *ops;
5993 void *count = (void *)-1;
5997 /* hash funcs only work with set_ftrace_filter */
6001 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6003 if (glob[0] == '!') {
6004 unregister_ftrace_function_probe_func(glob+1, ops);
6011 number = strsep(¶m, ":");
6013 if (!strlen(number))
6017 * We use the callback data field (which is a pointer)
6020 ret = kstrtoul(number, 0, (unsigned long *)&count);
6025 ret = register_ftrace_function_probe(glob, ops, count);
6028 alloc_snapshot(&global_trace);
6030 return ret < 0 ? ret : 0;
6033 static struct ftrace_func_command ftrace_snapshot_cmd = {
6035 .func = ftrace_trace_snapshot_callback,
6038 static __init int register_snapshot_cmd(void)
6040 return register_ftrace_command(&ftrace_snapshot_cmd);
6043 static inline __init int register_snapshot_cmd(void) { return 0; }
6044 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
6046 static struct dentry *tracing_get_dentry(struct trace_array *tr)
6048 if (WARN_ON(!tr->dir))
6049 return ERR_PTR(-ENODEV);
6051 /* Top directory uses NULL as the parent */
6052 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6055 /* All sub buffers have a descriptor */
6059 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6061 struct dentry *d_tracer;
6064 return tr->percpu_dir;
6066 d_tracer = tracing_get_dentry(tr);
6067 if (IS_ERR(d_tracer))
6070 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
6072 WARN_ONCE(!tr->percpu_dir,
6073 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
6075 return tr->percpu_dir;
6078 static struct dentry *
6079 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6080 void *data, long cpu, const struct file_operations *fops)
6082 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6084 if (ret) /* See tracing_get_cpu() */
6085 d_inode(ret)->i_cdev = (void *)(cpu + 1);
6090 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
6092 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
6093 struct dentry *d_cpu;
6094 char cpu_dir[30]; /* 30 characters should be more than enough */
6099 snprintf(cpu_dir, 30, "cpu%ld", cpu);
6100 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
6102 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
6106 /* per cpu trace_pipe */
6107 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
6108 tr, cpu, &tracing_pipe_fops);
6111 trace_create_cpu_file("trace", 0644, d_cpu,
6112 tr, cpu, &tracing_fops);
6114 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
6115 tr, cpu, &tracing_buffers_fops);
6117 trace_create_cpu_file("stats", 0444, d_cpu,
6118 tr, cpu, &tracing_stats_fops);
6120 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
6121 tr, cpu, &tracing_entries_fops);
6123 #ifdef CONFIG_TRACER_SNAPSHOT
6124 trace_create_cpu_file("snapshot", 0644, d_cpu,
6125 tr, cpu, &snapshot_fops);
6127 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
6128 tr, cpu, &snapshot_raw_fops);
6132 #ifdef CONFIG_FTRACE_SELFTEST
6133 /* Let selftest have access to static functions in this file */
6134 #include "trace_selftest.c"
6138 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6141 struct trace_option_dentry *topt = filp->private_data;
6144 if (topt->flags->val & topt->opt->bit)
6149 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6153 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6156 struct trace_option_dentry *topt = filp->private_data;
6160 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6164 if (val != 0 && val != 1)
6167 if (!!(topt->flags->val & topt->opt->bit) != val) {
6168 mutex_lock(&trace_types_lock);
6169 ret = __set_tracer_option(topt->tr, topt->flags,
6171 mutex_unlock(&trace_types_lock);
6182 static const struct file_operations trace_options_fops = {
6183 .open = tracing_open_generic,
6184 .read = trace_options_read,
6185 .write = trace_options_write,
6186 .llseek = generic_file_llseek,
6190 * In order to pass in both the trace_array descriptor as well as the index
6191 * to the flag that the trace option file represents, the trace_array
6192 * has a character array of trace_flags_index[], which holds the index
6193 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6194 * The address of this character array is passed to the flag option file
6195 * read/write callbacks.
6197 * In order to extract both the index and the trace_array descriptor,
6198 * get_tr_index() uses the following algorithm.
6202 * As the pointer itself contains the address of the index (remember
6205 * Then to get the trace_array descriptor, by subtracting that index
6206 * from the ptr, we get to the start of the index itself.
6208 * ptr - idx == &index[0]
6210 * Then a simple container_of() from that pointer gets us to the
6211 * trace_array descriptor.
6213 static void get_tr_index(void *data, struct trace_array **ptr,
6214 unsigned int *pindex)
6216 *pindex = *(unsigned char *)data;
6218 *ptr = container_of(data - *pindex, struct trace_array,
6223 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6226 void *tr_index = filp->private_data;
6227 struct trace_array *tr;
6231 get_tr_index(tr_index, &tr, &index);
6233 if (tr->trace_flags & (1 << index))
6238 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6242 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6245 void *tr_index = filp->private_data;
6246 struct trace_array *tr;
6251 get_tr_index(tr_index, &tr, &index);
6253 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6257 if (val != 0 && val != 1)
6260 mutex_lock(&trace_types_lock);
6261 ret = set_tracer_flag(tr, 1 << index, val);
6262 mutex_unlock(&trace_types_lock);
6272 static const struct file_operations trace_options_core_fops = {
6273 .open = tracing_open_generic,
6274 .read = trace_options_core_read,
6275 .write = trace_options_core_write,
6276 .llseek = generic_file_llseek,
6279 struct dentry *trace_create_file(const char *name,
6281 struct dentry *parent,
6283 const struct file_operations *fops)
6287 ret = tracefs_create_file(name, mode, parent, data, fops);
6289 pr_warning("Could not create tracefs '%s' entry\n", name);
6295 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6297 struct dentry *d_tracer;
6302 d_tracer = tracing_get_dentry(tr);
6303 if (IS_ERR(d_tracer))
6306 tr->options = tracefs_create_dir("options", d_tracer);
6308 pr_warning("Could not create tracefs directory 'options'\n");
6316 create_trace_option_file(struct trace_array *tr,
6317 struct trace_option_dentry *topt,
6318 struct tracer_flags *flags,
6319 struct tracer_opt *opt)
6321 struct dentry *t_options;
6323 t_options = trace_options_init_dentry(tr);
6327 topt->flags = flags;
6331 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6332 &trace_options_fops);
6337 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6339 struct trace_option_dentry *topts;
6340 struct trace_options *tr_topts;
6341 struct tracer_flags *flags;
6342 struct tracer_opt *opts;
6349 flags = tracer->flags;
6351 if (!flags || !flags->opts)
6355 * If this is an instance, only create flags for tracers
6356 * the instance may have.
6358 if (!trace_ok_for_array(tracer, tr))
6361 for (i = 0; i < tr->nr_topts; i++) {
6363 * Check if these flags have already been added.
6364 * Some tracers share flags.
6366 if (tr->topts[i].tracer->flags == tracer->flags)
6372 for (cnt = 0; opts[cnt].name; cnt++)
6375 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6379 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6386 tr->topts = tr_topts;
6387 tr->topts[tr->nr_topts].tracer = tracer;
6388 tr->topts[tr->nr_topts].topts = topts;
6391 for (cnt = 0; opts[cnt].name; cnt++) {
6392 create_trace_option_file(tr, &topts[cnt], flags,
6394 WARN_ONCE(topts[cnt].entry == NULL,
6395 "Failed to create trace option: %s",
6400 static struct dentry *
6401 create_trace_option_core_file(struct trace_array *tr,
6402 const char *option, long index)
6404 struct dentry *t_options;
6406 t_options = trace_options_init_dentry(tr);
6410 return trace_create_file(option, 0644, t_options,
6411 (void *)&tr->trace_flags_index[index],
6412 &trace_options_core_fops);
6415 static void create_trace_options_dir(struct trace_array *tr)
6417 struct dentry *t_options;
6418 bool top_level = tr == &global_trace;
6421 t_options = trace_options_init_dentry(tr);
6425 for (i = 0; trace_options[i]; i++) {
6427 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6428 create_trace_option_core_file(tr, trace_options[i], i);
6433 rb_simple_read(struct file *filp, char __user *ubuf,
6434 size_t cnt, loff_t *ppos)
6436 struct trace_array *tr = filp->private_data;
6440 r = tracer_tracing_is_on(tr);
6441 r = sprintf(buf, "%d\n", r);
6443 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6447 rb_simple_write(struct file *filp, const char __user *ubuf,
6448 size_t cnt, loff_t *ppos)
6450 struct trace_array *tr = filp->private_data;
6451 struct ring_buffer *buffer = tr->trace_buffer.buffer;
6455 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6460 mutex_lock(&trace_types_lock);
6462 tracer_tracing_on(tr);
6463 if (tr->current_trace->start)
6464 tr->current_trace->start(tr);
6466 tracer_tracing_off(tr);
6467 if (tr->current_trace->stop)
6468 tr->current_trace->stop(tr);
6470 mutex_unlock(&trace_types_lock);
6478 static const struct file_operations rb_simple_fops = {
6479 .open = tracing_open_generic_tr,
6480 .read = rb_simple_read,
6481 .write = rb_simple_write,
6482 .release = tracing_release_generic_tr,
6483 .llseek = default_llseek,
6486 struct dentry *trace_instance_dir;
6489 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
6492 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6494 enum ring_buffer_flags rb_flags;
6496 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6500 buf->buffer = ring_buffer_alloc(size, rb_flags);
6504 buf->data = alloc_percpu(struct trace_array_cpu);
6506 ring_buffer_free(buf->buffer);
6510 /* Allocate the first page for all buffers */
6511 set_buffer_entries(&tr->trace_buffer,
6512 ring_buffer_size(tr->trace_buffer.buffer, 0));
6517 static int allocate_trace_buffers(struct trace_array *tr, int size)
6521 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6525 #ifdef CONFIG_TRACER_MAX_TRACE
6526 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6527 allocate_snapshot ? size : 1);
6529 ring_buffer_free(tr->trace_buffer.buffer);
6530 free_percpu(tr->trace_buffer.data);
6533 tr->allocated_snapshot = allocate_snapshot;
6536 * Only the top level trace array gets its snapshot allocated
6537 * from the kernel command line.
6539 allocate_snapshot = false;
6544 static void free_trace_buffer(struct trace_buffer *buf)
6547 ring_buffer_free(buf->buffer);
6549 free_percpu(buf->data);
6554 static void free_trace_buffers(struct trace_array *tr)
6559 free_trace_buffer(&tr->trace_buffer);
6561 #ifdef CONFIG_TRACER_MAX_TRACE
6562 free_trace_buffer(&tr->max_buffer);
6566 static void init_trace_flags_index(struct trace_array *tr)
6570 /* Used by the trace options files */
6571 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6572 tr->trace_flags_index[i] = i;
6575 static void __update_tracer_options(struct trace_array *tr)
6579 for (t = trace_types; t; t = t->next)
6580 add_tracer_options(tr, t);
6583 static void update_tracer_options(struct trace_array *tr)
6585 mutex_lock(&trace_types_lock);
6586 __update_tracer_options(tr);
6587 mutex_unlock(&trace_types_lock);
6590 static int instance_mkdir(const char *name)
6592 struct trace_array *tr;
6595 mutex_lock(&trace_types_lock);
6598 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6599 if (tr->name && strcmp(tr->name, name) == 0)
6604 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6608 tr->name = kstrdup(name, GFP_KERNEL);
6612 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6615 tr->trace_flags = global_trace.trace_flags;
6617 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6619 raw_spin_lock_init(&tr->start_lock);
6621 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6623 tr->current_trace = &nop_trace;
6625 INIT_LIST_HEAD(&tr->systems);
6626 INIT_LIST_HEAD(&tr->events);
6628 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6631 tr->dir = tracefs_create_dir(name, trace_instance_dir);
6635 ret = event_trace_add_tracer(tr->dir, tr);
6637 tracefs_remove_recursive(tr->dir);
6641 init_tracer_tracefs(tr, tr->dir);
6642 init_trace_flags_index(tr);
6643 __update_tracer_options(tr);
6645 list_add(&tr->list, &ftrace_trace_arrays);
6647 mutex_unlock(&trace_types_lock);
6652 free_trace_buffers(tr);
6653 free_cpumask_var(tr->tracing_cpumask);
6658 mutex_unlock(&trace_types_lock);
6664 static int instance_rmdir(const char *name)
6666 struct trace_array *tr;
6671 mutex_lock(&trace_types_lock);
6674 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6675 if (tr->name && strcmp(tr->name, name) == 0) {
6684 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
6687 list_del(&tr->list);
6689 tracing_set_nop(tr);
6690 event_trace_del_tracer(tr);
6691 ftrace_destroy_function_files(tr);
6692 debugfs_remove_recursive(tr->dir);
6693 free_trace_buffers(tr);
6695 for (i = 0; i < tr->nr_topts; i++) {
6696 kfree(tr->topts[i].topts);
6706 mutex_unlock(&trace_types_lock);
6711 static __init void create_trace_instances(struct dentry *d_tracer)
6713 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6716 if (WARN_ON(!trace_instance_dir))
6721 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
6725 trace_create_file("available_tracers", 0444, d_tracer,
6726 tr, &show_traces_fops);
6728 trace_create_file("current_tracer", 0644, d_tracer,
6729 tr, &set_tracer_fops);
6731 trace_create_file("tracing_cpumask", 0644, d_tracer,
6732 tr, &tracing_cpumask_fops);
6734 trace_create_file("trace_options", 0644, d_tracer,
6735 tr, &tracing_iter_fops);
6737 trace_create_file("trace", 0644, d_tracer,
6740 trace_create_file("trace_pipe", 0444, d_tracer,
6741 tr, &tracing_pipe_fops);
6743 trace_create_file("buffer_size_kb", 0644, d_tracer,
6744 tr, &tracing_entries_fops);
6746 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6747 tr, &tracing_total_entries_fops);
6749 trace_create_file("free_buffer", 0200, d_tracer,
6750 tr, &tracing_free_buffer_fops);
6752 trace_create_file("trace_marker", 0220, d_tracer,
6753 tr, &tracing_mark_fops);
6755 trace_create_file("trace_clock", 0644, d_tracer, tr,
6758 trace_create_file("tracing_on", 0644, d_tracer,
6759 tr, &rb_simple_fops);
6761 create_trace_options_dir(tr);
6763 #ifdef CONFIG_TRACER_MAX_TRACE
6764 trace_create_file("tracing_max_latency", 0644, d_tracer,
6765 &tr->max_latency, &tracing_max_lat_fops);
6768 if (ftrace_create_function_files(tr, d_tracer))
6769 WARN(1, "Could not allocate function filter files");
6771 #ifdef CONFIG_TRACER_SNAPSHOT
6772 trace_create_file("snapshot", 0644, d_tracer,
6773 tr, &snapshot_fops);
6776 for_each_tracing_cpu(cpu)
6777 tracing_init_tracefs_percpu(tr, cpu);
6781 static struct vfsmount *trace_automount(void *ingore)
6783 struct vfsmount *mnt;
6784 struct file_system_type *type;
6787 * To maintain backward compatibility for tools that mount
6788 * debugfs to get to the tracing facility, tracefs is automatically
6789 * mounted to the debugfs/tracing directory.
6791 type = get_fs_type("tracefs");
6794 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6795 put_filesystem(type);
6804 * tracing_init_dentry - initialize top level trace array
6806 * This is called when creating files or directories in the tracing
6807 * directory. It is called via fs_initcall() by any of the boot up code
6808 * and expects to return the dentry of the top level tracing directory.
6810 struct dentry *tracing_init_dentry(void)
6812 struct trace_array *tr = &global_trace;
6814 /* The top level trace array uses NULL as parent */
6818 if (WARN_ON(!debugfs_initialized()))
6819 return ERR_PTR(-ENODEV);
6822 * As there may still be users that expect the tracing
6823 * files to exist in debugfs/tracing, we must automount
6824 * the tracefs file system there, so older tools still
6825 * work with the newer kerenl.
6827 tr->dir = debugfs_create_automount("tracing", NULL,
6828 trace_automount, NULL);
6830 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6831 return ERR_PTR(-ENOMEM);
6837 extern struct trace_enum_map *__start_ftrace_enum_maps[];
6838 extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6840 static void __init trace_enum_init(void)
6844 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
6845 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
6848 #ifdef CONFIG_MODULES
6849 static void trace_module_add_enums(struct module *mod)
6851 if (!mod->num_trace_enums)
6855 * Modules with bad taint do not have events created, do
6856 * not bother with enums either.
6858 if (trace_module_has_bad_taint(mod))
6861 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
6864 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
6865 static void trace_module_remove_enums(struct module *mod)
6867 union trace_enum_map_item *map;
6868 union trace_enum_map_item **last = &trace_enum_maps;
6870 if (!mod->num_trace_enums)
6873 mutex_lock(&trace_enum_mutex);
6875 map = trace_enum_maps;
6878 if (map->head.mod == mod)
6880 map = trace_enum_jmp_to_tail(map);
6881 last = &map->tail.next;
6882 map = map->tail.next;
6887 *last = trace_enum_jmp_to_tail(map)->tail.next;
6890 mutex_unlock(&trace_enum_mutex);
6893 static inline void trace_module_remove_enums(struct module *mod) { }
6894 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6896 static int trace_module_notify(struct notifier_block *self,
6897 unsigned long val, void *data)
6899 struct module *mod = data;
6902 case MODULE_STATE_COMING:
6903 trace_module_add_enums(mod);
6905 case MODULE_STATE_GOING:
6906 trace_module_remove_enums(mod);
6913 static struct notifier_block trace_module_nb = {
6914 .notifier_call = trace_module_notify,
6917 #endif /* CONFIG_MODULES */
6919 static __init int tracer_init_tracefs(void)
6921 struct dentry *d_tracer;
6923 trace_access_lock_init();
6925 d_tracer = tracing_init_dentry();
6926 if (IS_ERR(d_tracer))
6929 init_tracer_tracefs(&global_trace, d_tracer);
6931 trace_create_file("tracing_thresh", 0644, d_tracer,
6932 &global_trace, &tracing_thresh_fops);
6934 trace_create_file("README", 0444, d_tracer,
6935 NULL, &tracing_readme_fops);
6937 trace_create_file("saved_cmdlines", 0444, d_tracer,
6938 NULL, &tracing_saved_cmdlines_fops);
6940 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6941 NULL, &tracing_saved_cmdlines_size_fops);
6945 trace_create_enum_file(d_tracer);
6947 #ifdef CONFIG_MODULES
6948 register_module_notifier(&trace_module_nb);
6951 #ifdef CONFIG_DYNAMIC_FTRACE
6952 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6953 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6956 create_trace_instances(d_tracer);
6958 update_tracer_options(&global_trace);
6963 static int trace_panic_handler(struct notifier_block *this,
6964 unsigned long event, void *unused)
6966 if (ftrace_dump_on_oops)
6967 ftrace_dump(ftrace_dump_on_oops);
6971 static struct notifier_block trace_panic_notifier = {
6972 .notifier_call = trace_panic_handler,
6974 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6977 static int trace_die_handler(struct notifier_block *self,
6983 if (ftrace_dump_on_oops)
6984 ftrace_dump(ftrace_dump_on_oops);
6992 static struct notifier_block trace_die_notifier = {
6993 .notifier_call = trace_die_handler,
6998 * printk is set to max of 1024, we really don't need it that big.
6999 * Nothing should be printing 1000 characters anyway.
7001 #define TRACE_MAX_PRINT 1000
7004 * Define here KERN_TRACE so that we have one place to modify
7005 * it if we decide to change what log level the ftrace dump
7008 #define KERN_TRACE KERN_EMERG
7011 trace_printk_seq(struct trace_seq *s)
7013 /* Probably should print a warning here. */
7014 if (s->seq.len >= TRACE_MAX_PRINT)
7015 s->seq.len = TRACE_MAX_PRINT;
7018 * More paranoid code. Although the buffer size is set to
7019 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7020 * an extra layer of protection.
7022 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7023 s->seq.len = s->seq.size - 1;
7025 /* should be zero ended, but we are paranoid. */
7026 s->buffer[s->seq.len] = 0;
7028 printk(KERN_TRACE "%s", s->buffer);
7033 void trace_init_global_iter(struct trace_iterator *iter)
7035 iter->tr = &global_trace;
7036 iter->trace = iter->tr->current_trace;
7037 iter->cpu_file = RING_BUFFER_ALL_CPUS;
7038 iter->trace_buffer = &global_trace.trace_buffer;
7040 if (iter->trace && iter->trace->open)
7041 iter->trace->open(iter);
7043 /* Annotate start of buffers if we had overruns */
7044 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7045 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7047 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7048 if (trace_clocks[iter->tr->clock_id].in_ns)
7049 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
7052 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
7054 /* use static because iter can be a bit big for the stack */
7055 static struct trace_iterator iter;
7056 static atomic_t dump_running;
7057 struct trace_array *tr = &global_trace;
7058 unsigned int old_userobj;
7059 unsigned long flags;
7062 /* Only allow one dump user at a time. */
7063 if (atomic_inc_return(&dump_running) != 1) {
7064 atomic_dec(&dump_running);
7069 * Always turn off tracing when we dump.
7070 * We don't need to show trace output of what happens
7071 * between multiple crashes.
7073 * If the user does a sysrq-z, then they can re-enable
7074 * tracing with echo 1 > tracing_on.
7078 local_irq_save(flags);
7080 /* Simulate the iterator */
7081 trace_init_global_iter(&iter);
7083 for_each_tracing_cpu(cpu) {
7084 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7087 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
7089 /* don't look at user memory in panic mode */
7090 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
7092 switch (oops_dump_mode) {
7094 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7097 iter.cpu_file = raw_smp_processor_id();
7102 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
7103 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7106 printk(KERN_TRACE "Dumping ftrace buffer:\n");
7108 /* Did function tracer already get disabled? */
7109 if (ftrace_is_dead()) {
7110 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7111 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7115 * We need to stop all tracing on all CPUS to read the
7116 * the next buffer. This is a bit expensive, but is
7117 * not done often. We fill all what we can read,
7118 * and then release the locks again.
7121 while (!trace_empty(&iter)) {
7124 printk(KERN_TRACE "---------------------------------\n");
7128 /* reset all but tr, trace, and overruns */
7129 memset(&iter.seq, 0,
7130 sizeof(struct trace_iterator) -
7131 offsetof(struct trace_iterator, seq));
7132 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7135 if (trace_find_next_entry_inc(&iter) != NULL) {
7138 ret = print_trace_line(&iter);
7139 if (ret != TRACE_TYPE_NO_CONSUME)
7140 trace_consume(&iter);
7142 touch_nmi_watchdog();
7144 trace_printk_seq(&iter.seq);
7148 printk(KERN_TRACE " (ftrace buffer empty)\n");
7150 printk(KERN_TRACE "---------------------------------\n");
7153 tr->trace_flags |= old_userobj;
7155 for_each_tracing_cpu(cpu) {
7156 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7158 atomic_dec(&dump_running);
7159 local_irq_restore(flags);
7161 EXPORT_SYMBOL_GPL(ftrace_dump);
7163 __init static int tracer_alloc_buffers(void)
7169 * Make sure we don't accidently add more trace options
7170 * than we have bits for.
7172 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
7174 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7177 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
7178 goto out_free_buffer_mask;
7180 /* Only allocate trace_printk buffers if a trace_printk exists */
7181 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
7182 /* Must be called before global_trace.buffer is allocated */
7183 trace_printk_init_buffers();
7185 /* To save memory, keep the ring buffer size to its minimum */
7186 if (ring_buffer_expanded)
7187 ring_buf_size = trace_buf_size;
7191 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
7192 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
7194 raw_spin_lock_init(&global_trace.start_lock);
7196 /* Used for event triggers */
7197 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7199 goto out_free_cpumask;
7201 if (trace_create_savedcmd() < 0)
7202 goto out_free_temp_buffer;
7204 /* TODO: make the number of buffers hot pluggable with CPUS */
7205 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
7206 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7208 goto out_free_savedcmd;
7211 if (global_trace.buffer_disabled)
7214 if (trace_boot_clock) {
7215 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7217 pr_warning("Trace clock %s not defined, going back to default\n",
7222 * register_tracer() might reference current_trace, so it
7223 * needs to be set before we register anything. This is
7224 * just a bootstrap of current_trace anyway.
7226 global_trace.current_trace = &nop_trace;
7228 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7230 ftrace_init_global_array_ops(&global_trace);
7232 init_trace_flags_index(&global_trace);
7234 register_tracer(&nop_trace);
7236 /* All seems OK, enable tracing */
7237 tracing_disabled = 0;
7239 atomic_notifier_chain_register(&panic_notifier_list,
7240 &trace_panic_notifier);
7242 register_die_notifier(&trace_die_notifier);
7244 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7246 INIT_LIST_HEAD(&global_trace.systems);
7247 INIT_LIST_HEAD(&global_trace.events);
7248 list_add(&global_trace.list, &ftrace_trace_arrays);
7250 while (trace_boot_options) {
7253 option = strsep(&trace_boot_options, ",");
7254 trace_set_options(&global_trace, option);
7257 register_snapshot_cmd();
7262 free_saved_cmdlines_buffer(savedcmd);
7263 out_free_temp_buffer:
7264 ring_buffer_free(temp_buffer);
7266 free_cpumask_var(global_trace.tracing_cpumask);
7267 out_free_buffer_mask:
7268 free_cpumask_var(tracing_buffer_mask);
7273 void __init trace_init(void)
7275 if (tracepoint_printk) {
7276 tracepoint_print_iter =
7277 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7278 if (WARN_ON(!tracepoint_print_iter))
7279 tracepoint_printk = 0;
7281 tracer_alloc_buffers();
7285 __init static int clear_boot_tracer(void)
7288 * The default tracer at boot buffer is an init section.
7289 * This function is called in lateinit. If we did not
7290 * find the boot tracer, then clear it out, to prevent
7291 * later registration from accessing the buffer that is
7292 * about to be freed.
7294 if (!default_bootup_tracer)
7297 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7298 default_bootup_tracer);
7299 default_bootup_tracer = NULL;
7304 fs_initcall(tracer_init_tracefs);
7305 late_initcall(clear_boot_tracer);