4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
12 #include "trace_output.h"
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE 128
17 DECLARE_RWSEM(trace_event_sem);
19 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
21 static int next_event_type = __TRACE_LAST_TYPE + 1;
23 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
25 struct trace_seq *s = &iter->seq;
26 struct trace_entry *entry = iter->ent;
27 struct bputs_entry *field;
29 trace_assign_type(field, entry);
31 trace_seq_puts(s, field->str);
33 return trace_handle_return(s);
36 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
38 struct trace_seq *s = &iter->seq;
39 struct trace_entry *entry = iter->ent;
40 struct bprint_entry *field;
42 trace_assign_type(field, entry);
44 trace_seq_bprintf(s, field->fmt, field->buf);
46 return trace_handle_return(s);
49 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
51 struct trace_seq *s = &iter->seq;
52 struct trace_entry *entry = iter->ent;
53 struct print_entry *field;
55 trace_assign_type(field, entry);
57 trace_seq_puts(s, field->buf);
59 return trace_handle_return(s);
63 trace_print_flags_seq(struct trace_seq *p, const char *delim,
65 const struct trace_print_flags *flag_array)
69 const char *ret = trace_seq_buffer_ptr(p);
72 for (i = 0; flag_array[i].name && flags; i++) {
74 mask = flag_array[i].mask;
75 if ((flags & mask) != mask)
78 str = flag_array[i].name;
81 trace_seq_puts(p, delim);
84 trace_seq_puts(p, str);
87 /* check for left over flags */
90 trace_seq_puts(p, delim);
91 trace_seq_printf(p, "0x%lx", flags);
98 EXPORT_SYMBOL(trace_print_flags_seq);
101 trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
102 const struct trace_print_flags *symbol_array)
105 const char *ret = trace_seq_buffer_ptr(p);
107 for (i = 0; symbol_array[i].name; i++) {
109 if (val != symbol_array[i].mask)
112 trace_seq_puts(p, symbol_array[i].name);
116 if (ret == (const char *)(trace_seq_buffer_ptr(p)))
117 trace_seq_printf(p, "0x%lx", val);
119 trace_seq_putc(p, 0);
123 EXPORT_SYMBOL(trace_print_symbols_seq);
125 #if BITS_PER_LONG == 32
127 trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
128 unsigned long long flags,
129 const struct trace_print_flags_u64 *flag_array)
131 unsigned long long mask;
133 const char *ret = trace_seq_buffer_ptr(p);
136 for (i = 0; flag_array[i].name && flags; i++) {
138 mask = flag_array[i].mask;
139 if ((flags & mask) != mask)
142 str = flag_array[i].name;
145 trace_seq_puts(p, delim);
148 trace_seq_puts(p, str);
151 /* check for left over flags */
154 trace_seq_puts(p, delim);
155 trace_seq_printf(p, "0x%llx", flags);
158 trace_seq_putc(p, 0);
162 EXPORT_SYMBOL(trace_print_flags_seq_u64);
165 trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
166 const struct trace_print_flags_u64 *symbol_array)
169 const char *ret = trace_seq_buffer_ptr(p);
171 for (i = 0; symbol_array[i].name; i++) {
173 if (val != symbol_array[i].mask)
176 trace_seq_puts(p, symbol_array[i].name);
180 if (ret == (const char *)(trace_seq_buffer_ptr(p)))
181 trace_seq_printf(p, "0x%llx", val);
183 trace_seq_putc(p, 0);
187 EXPORT_SYMBOL(trace_print_symbols_seq_u64);
191 trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
192 unsigned int bitmask_size)
194 const char *ret = trace_seq_buffer_ptr(p);
196 trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8);
197 trace_seq_putc(p, 0);
201 EXPORT_SYMBOL_GPL(trace_print_bitmask_seq);
204 * trace_print_hex_seq - print buffer as hex sequence
205 * @p: trace seq struct to write to
206 * @buf: The buffer to print
207 * @buf_len: Length of @buf in bytes
208 * @concatenate: Print @buf as single hex string or with spacing
210 * Prints the passed buffer as a hex sequence either as a whole,
211 * single hex string if @concatenate is true or with spacing after
212 * each byte in case @concatenate is false.
215 trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len,
219 const char *ret = trace_seq_buffer_ptr(p);
221 for (i = 0; i < buf_len; i++)
222 trace_seq_printf(p, "%s%2.2x", concatenate || i == 0 ? "" : " ",
224 trace_seq_putc(p, 0);
228 EXPORT_SYMBOL(trace_print_hex_seq);
231 trace_print_array_seq(struct trace_seq *p, const void *buf, int count,
234 const char *ret = trace_seq_buffer_ptr(p);
235 const char *prefix = "";
236 void *ptr = (void *)buf;
237 size_t buf_len = count * el_size;
239 trace_seq_putc(p, '{');
241 while (ptr < buf + buf_len) {
244 trace_seq_printf(p, "%s0x%x", prefix,
248 trace_seq_printf(p, "%s0x%x", prefix,
252 trace_seq_printf(p, "%s0x%x", prefix,
256 trace_seq_printf(p, "%s0x%llx", prefix,
260 trace_seq_printf(p, "BAD SIZE:%zu 0x%x", el_size,
268 trace_seq_putc(p, '}');
269 trace_seq_putc(p, 0);
273 EXPORT_SYMBOL(trace_print_array_seq);
275 int trace_raw_output_prep(struct trace_iterator *iter,
276 struct trace_event *trace_event)
278 struct trace_event_call *event;
279 struct trace_seq *s = &iter->seq;
280 struct trace_seq *p = &iter->tmp_seq;
281 struct trace_entry *entry;
283 event = container_of(trace_event, struct trace_event_call, event);
286 if (entry->type != event->event.type) {
288 return TRACE_TYPE_UNHANDLED;
292 trace_seq_printf(s, "%s: ", trace_event_name(event));
294 return trace_handle_return(s);
296 EXPORT_SYMBOL(trace_raw_output_prep);
298 static int trace_output_raw(struct trace_iterator *iter, char *name,
299 char *fmt, va_list ap)
301 struct trace_seq *s = &iter->seq;
303 trace_seq_printf(s, "%s: ", name);
304 trace_seq_vprintf(s, fmt, ap);
306 return trace_handle_return(s);
309 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
315 ret = trace_output_raw(iter, name, fmt, ap);
320 EXPORT_SYMBOL_GPL(trace_output_call);
322 #ifdef CONFIG_KRETPROBES
323 static inline const char *kretprobed(const char *name)
325 static const char tramp_name[] = "kretprobe_trampoline";
326 int size = sizeof(tramp_name);
328 if (strncmp(tramp_name, name, size) == 0)
329 return "[unknown/kretprobe'd]";
333 static inline const char *kretprobed(const char *name)
337 #endif /* CONFIG_KRETPROBES */
340 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
342 #ifdef CONFIG_KALLSYMS
343 char str[KSYM_SYMBOL_LEN];
346 kallsyms_lookup(address, NULL, NULL, NULL, str);
348 name = kretprobed(str);
350 trace_seq_printf(s, fmt, name);
355 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
356 unsigned long address)
358 #ifdef CONFIG_KALLSYMS
359 char str[KSYM_SYMBOL_LEN];
362 sprint_symbol(str, address);
363 name = kretprobed(str);
365 trace_seq_printf(s, fmt, name);
370 # define IP_FMT "%08lx"
372 # define IP_FMT "%016lx"
375 static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
376 unsigned long ip, unsigned long sym_flags)
378 struct file *file = NULL;
379 unsigned long vmstart = 0;
386 const struct vm_area_struct *vma;
388 down_read(&mm->mmap_sem);
389 vma = find_vma(mm, ip);
392 vmstart = vma->vm_start;
395 ret = trace_seq_path(s, &file->f_path);
397 trace_seq_printf(s, "[+0x%lx]",
400 up_read(&mm->mmap_sem);
402 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
403 trace_seq_printf(s, " <" IP_FMT ">", ip);
404 return !trace_seq_has_overflowed(s);
408 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
411 trace_seq_putc(s, '0');
415 if (sym_flags & TRACE_ITER_SYM_OFFSET)
416 seq_print_sym_offset(s, "%s", ip);
418 seq_print_sym_short(s, "%s", ip);
420 if (sym_flags & TRACE_ITER_SYM_ADDR)
421 trace_seq_printf(s, " <" IP_FMT ">", ip);
424 return !trace_seq_has_overflowed(s);
428 * trace_print_lat_fmt - print the irq, preempt and lockdep fields
429 * @s: trace seq struct to write to
430 * @entry: The trace entry field from the ring buffer
432 * Prints the generic fields of irqs off, in hard or softirq, preempt
435 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
444 nmi = entry->flags & TRACE_FLAG_NMI;
445 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
446 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
449 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
450 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
453 switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
454 TRACE_FLAG_PREEMPT_RESCHED)) {
455 case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
458 case TRACE_FLAG_NEED_RESCHED:
461 case TRACE_FLAG_PREEMPT_RESCHED:
470 (nmi && hardirq) ? 'Z' :
472 (hardirq && softirq) ? 'H' :
477 trace_seq_printf(s, "%c%c%c",
478 irqs_off, need_resched, hardsoft_irq);
480 if (entry->preempt_count)
481 trace_seq_printf(s, "%x", entry->preempt_count);
483 trace_seq_putc(s, '.');
485 return !trace_seq_has_overflowed(s);
489 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
491 char comm[TASK_COMM_LEN];
493 trace_find_cmdline(entry->pid, comm);
495 trace_seq_printf(s, "%8.8s-%-5d %3d",
496 comm, entry->pid, cpu);
498 return trace_print_lat_fmt(s, entry);
502 #define MARK(v, s) {.val = v, .sym = s}
503 /* trace overhead mark */
504 static const struct trace_mark {
505 unsigned long long val; /* unit: nsec */
508 MARK(1000000000ULL , '$'), /* 1 sec */
509 MARK(100000000ULL , '@'), /* 100 msec */
510 MARK(10000000ULL , '*'), /* 10 msec */
511 MARK(1000000ULL , '#'), /* 1000 usecs */
512 MARK(100000ULL , '!'), /* 100 usecs */
513 MARK(10000ULL , '+'), /* 10 usecs */
517 char trace_find_mark(unsigned long long d)
520 int size = ARRAY_SIZE(mark);
522 for (i = 0; i < size; i++) {
527 return (i == size) ? ' ' : mark[i].sym;
531 lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
533 struct trace_array *tr = iter->tr;
534 unsigned long verbose = tr->trace_flags & TRACE_ITER_VERBOSE;
535 unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
536 unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start;
537 unsigned long long rel_ts = next_ts - iter->ts;
538 struct trace_seq *s = &iter->seq;
541 abs_ts = ns2usecs(abs_ts);
542 rel_ts = ns2usecs(rel_ts);
545 if (verbose && in_ns) {
546 unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC);
547 unsigned long abs_msec = (unsigned long)abs_ts;
548 unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC);
549 unsigned long rel_msec = (unsigned long)rel_ts;
552 s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
557 } else if (verbose && !in_ns) {
559 s, "[%016llx] %lld (+%lld): ",
560 iter->ts, abs_ts, rel_ts);
562 } else if (!verbose && in_ns) {
566 trace_find_mark(rel_ts * NSEC_PER_USEC));
568 } else { /* !verbose && !in_ns */
569 trace_seq_printf(s, " %4lld: ", abs_ts);
572 return !trace_seq_has_overflowed(s);
575 int trace_print_context(struct trace_iterator *iter)
577 struct trace_array *tr = iter->tr;
578 struct trace_seq *s = &iter->seq;
579 struct trace_entry *entry = iter->ent;
580 unsigned long long t;
581 unsigned long secs, usec_rem;
582 char comm[TASK_COMM_LEN];
584 trace_find_cmdline(entry->pid, comm);
586 trace_seq_printf(s, "%16s-%-5d [%03d] ",
587 comm, entry->pid, iter->cpu);
589 if (tr->trace_flags & TRACE_ITER_IRQ_INFO)
590 trace_print_lat_fmt(s, entry);
592 if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
593 t = ns2usecs(iter->ts);
594 usec_rem = do_div(t, USEC_PER_SEC);
595 secs = (unsigned long)t;
596 trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem);
598 trace_seq_printf(s, " %12llu: ", iter->ts);
600 return !trace_seq_has_overflowed(s);
603 int trace_print_lat_context(struct trace_iterator *iter)
605 struct trace_array *tr = iter->tr;
606 /* trace_find_next_entry will reset ent_size */
607 int ent_size = iter->ent_size;
608 struct trace_seq *s = &iter->seq;
610 struct trace_entry *entry = iter->ent,
611 *next_entry = trace_find_next_entry(iter, NULL,
613 unsigned long verbose = (tr->trace_flags & TRACE_ITER_VERBOSE);
615 /* Restore the original ent_size */
616 iter->ent_size = ent_size;
622 char comm[TASK_COMM_LEN];
624 trace_find_cmdline(entry->pid, comm);
627 s, "%16s %5d %3d %d %08x %08lx ",
628 comm, entry->pid, iter->cpu, entry->flags,
629 entry->preempt_count, iter->idx);
631 lat_print_generic(s, entry, iter->cpu);
634 lat_print_timestamp(iter, next_ts);
636 return !trace_seq_has_overflowed(s);
639 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
641 static int task_state_char(unsigned long state)
643 int bit = state ? __ffs(state) + 1 : 0;
645 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
649 * ftrace_find_event - find a registered event
650 * @type: the type of event to look for
652 * Returns an event of type @type otherwise NULL
653 * Called with trace_event_read_lock() held.
655 struct trace_event *ftrace_find_event(int type)
657 struct trace_event *event;
660 key = type & (EVENT_HASHSIZE - 1);
662 hlist_for_each_entry(event, &event_hash[key], node) {
663 if (event->type == type)
670 static LIST_HEAD(ftrace_event_list);
672 static int trace_search_list(struct list_head **list)
674 struct trace_event *e;
675 int last = __TRACE_LAST_TYPE;
677 if (list_empty(&ftrace_event_list)) {
678 *list = &ftrace_event_list;
683 * We used up all possible max events,
684 * lets see if somebody freed one.
686 list_for_each_entry(e, &ftrace_event_list, list) {
687 if (e->type != last + 1)
692 /* Did we used up all 65 thousand events??? */
693 if ((last + 1) > TRACE_EVENT_TYPE_MAX)
700 void trace_event_read_lock(void)
702 down_read(&trace_event_sem);
705 void trace_event_read_unlock(void)
707 up_read(&trace_event_sem);
711 * register_trace_event - register output for an event type
712 * @event: the event type to register
714 * Event types are stored in a hash and this hash is used to
715 * find a way to print an event. If the @event->type is set
716 * then it will use that type, otherwise it will assign a
719 * If you assign your own type, please make sure it is added
720 * to the trace_type enum in trace.h, to avoid collisions
721 * with the dynamic types.
723 * Returns the event type number or zero on error.
725 int register_trace_event(struct trace_event *event)
730 down_write(&trace_event_sem);
735 if (WARN_ON(!event->funcs))
738 INIT_LIST_HEAD(&event->list);
741 struct list_head *list = NULL;
743 if (next_event_type > TRACE_EVENT_TYPE_MAX) {
745 event->type = trace_search_list(&list);
751 event->type = next_event_type++;
752 list = &ftrace_event_list;
755 if (WARN_ON(ftrace_find_event(event->type)))
758 list_add_tail(&event->list, list);
760 } else if (event->type > __TRACE_LAST_TYPE) {
761 printk(KERN_WARNING "Need to add type to trace.h\n");
765 /* Is this event already used */
766 if (ftrace_find_event(event->type))
770 if (event->funcs->trace == NULL)
771 event->funcs->trace = trace_nop_print;
772 if (event->funcs->raw == NULL)
773 event->funcs->raw = trace_nop_print;
774 if (event->funcs->hex == NULL)
775 event->funcs->hex = trace_nop_print;
776 if (event->funcs->binary == NULL)
777 event->funcs->binary = trace_nop_print;
779 key = event->type & (EVENT_HASHSIZE - 1);
781 hlist_add_head(&event->node, &event_hash[key]);
785 up_write(&trace_event_sem);
789 EXPORT_SYMBOL_GPL(register_trace_event);
792 * Used by module code with the trace_event_sem held for write.
794 int __unregister_trace_event(struct trace_event *event)
796 hlist_del(&event->node);
797 list_del(&event->list);
802 * unregister_trace_event - remove a no longer used event
803 * @event: the event to remove
805 int unregister_trace_event(struct trace_event *event)
807 down_write(&trace_event_sem);
808 __unregister_trace_event(event);
809 up_write(&trace_event_sem);
813 EXPORT_SYMBOL_GPL(unregister_trace_event);
819 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
820 struct trace_event *event)
822 trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type);
824 return trace_handle_return(&iter->seq);
828 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
829 struct trace_event *event)
831 struct ftrace_entry *field;
832 struct trace_seq *s = &iter->seq;
834 trace_assign_type(field, iter->ent);
836 seq_print_ip_sym(s, field->ip, flags);
838 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
839 trace_seq_puts(s, " <-");
840 seq_print_ip_sym(s, field->parent_ip, flags);
843 trace_seq_putc(s, '\n');
845 return trace_handle_return(s);
848 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags,
849 struct trace_event *event)
851 struct ftrace_entry *field;
853 trace_assign_type(field, iter->ent);
855 trace_seq_printf(&iter->seq, "%lx %lx\n",
859 return trace_handle_return(&iter->seq);
862 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags,
863 struct trace_event *event)
865 struct ftrace_entry *field;
866 struct trace_seq *s = &iter->seq;
868 trace_assign_type(field, iter->ent);
870 SEQ_PUT_HEX_FIELD(s, field->ip);
871 SEQ_PUT_HEX_FIELD(s, field->parent_ip);
873 return trace_handle_return(s);
876 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags,
877 struct trace_event *event)
879 struct ftrace_entry *field;
880 struct trace_seq *s = &iter->seq;
882 trace_assign_type(field, iter->ent);
884 SEQ_PUT_FIELD(s, field->ip);
885 SEQ_PUT_FIELD(s, field->parent_ip);
887 return trace_handle_return(s);
890 static struct trace_event_functions trace_fn_funcs = {
891 .trace = trace_fn_trace,
894 .binary = trace_fn_bin,
897 static struct trace_event trace_fn_event = {
899 .funcs = &trace_fn_funcs,
902 /* TRACE_CTX an TRACE_WAKE */
903 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
906 struct ctx_switch_entry *field;
907 char comm[TASK_COMM_LEN];
911 trace_assign_type(field, iter->ent);
913 T = task_state_char(field->next_state);
914 S = task_state_char(field->prev_state);
915 trace_find_cmdline(field->next_pid, comm);
916 trace_seq_printf(&iter->seq,
917 " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
926 return trace_handle_return(&iter->seq);
929 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags,
930 struct trace_event *event)
932 return trace_ctxwake_print(iter, "==>");
935 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
936 int flags, struct trace_event *event)
938 return trace_ctxwake_print(iter, " +");
941 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
943 struct ctx_switch_entry *field;
946 trace_assign_type(field, iter->ent);
949 S = task_state_char(field->prev_state);
950 T = task_state_char(field->next_state);
951 trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
960 return trace_handle_return(&iter->seq);
963 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags,
964 struct trace_event *event)
966 return trace_ctxwake_raw(iter, 0);
969 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags,
970 struct trace_event *event)
972 return trace_ctxwake_raw(iter, '+');
976 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
978 struct ctx_switch_entry *field;
979 struct trace_seq *s = &iter->seq;
982 trace_assign_type(field, iter->ent);
985 S = task_state_char(field->prev_state);
986 T = task_state_char(field->next_state);
988 SEQ_PUT_HEX_FIELD(s, field->prev_pid);
989 SEQ_PUT_HEX_FIELD(s, field->prev_prio);
990 SEQ_PUT_HEX_FIELD(s, S);
991 SEQ_PUT_HEX_FIELD(s, field->next_cpu);
992 SEQ_PUT_HEX_FIELD(s, field->next_pid);
993 SEQ_PUT_HEX_FIELD(s, field->next_prio);
994 SEQ_PUT_HEX_FIELD(s, T);
996 return trace_handle_return(s);
999 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags,
1000 struct trace_event *event)
1002 return trace_ctxwake_hex(iter, 0);
1005 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags,
1006 struct trace_event *event)
1008 return trace_ctxwake_hex(iter, '+');
1011 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
1012 int flags, struct trace_event *event)
1014 struct ctx_switch_entry *field;
1015 struct trace_seq *s = &iter->seq;
1017 trace_assign_type(field, iter->ent);
1019 SEQ_PUT_FIELD(s, field->prev_pid);
1020 SEQ_PUT_FIELD(s, field->prev_prio);
1021 SEQ_PUT_FIELD(s, field->prev_state);
1022 SEQ_PUT_FIELD(s, field->next_cpu);
1023 SEQ_PUT_FIELD(s, field->next_pid);
1024 SEQ_PUT_FIELD(s, field->next_prio);
1025 SEQ_PUT_FIELD(s, field->next_state);
1027 return trace_handle_return(s);
1030 static struct trace_event_functions trace_ctx_funcs = {
1031 .trace = trace_ctx_print,
1032 .raw = trace_ctx_raw,
1033 .hex = trace_ctx_hex,
1034 .binary = trace_ctxwake_bin,
1037 static struct trace_event trace_ctx_event = {
1039 .funcs = &trace_ctx_funcs,
1042 static struct trace_event_functions trace_wake_funcs = {
1043 .trace = trace_wake_print,
1044 .raw = trace_wake_raw,
1045 .hex = trace_wake_hex,
1046 .binary = trace_ctxwake_bin,
1049 static struct trace_event trace_wake_event = {
1051 .funcs = &trace_wake_funcs,
1056 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1057 int flags, struct trace_event *event)
1059 struct stack_entry *field;
1060 struct trace_seq *s = &iter->seq;
1064 trace_assign_type(field, iter->ent);
1065 end = (unsigned long *)((long)iter->ent + iter->ent_size);
1067 trace_seq_puts(s, "<stack trace>\n");
1069 for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) {
1071 if (trace_seq_has_overflowed(s))
1074 trace_seq_puts(s, " => ");
1075 seq_print_ip_sym(s, *p, flags);
1076 trace_seq_putc(s, '\n');
1079 return trace_handle_return(s);
1082 static struct trace_event_functions trace_stack_funcs = {
1083 .trace = trace_stack_print,
1086 static struct trace_event trace_stack_event = {
1087 .type = TRACE_STACK,
1088 .funcs = &trace_stack_funcs,
1091 /* TRACE_USER_STACK */
1092 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1093 int flags, struct trace_event *event)
1095 struct trace_array *tr = iter->tr;
1096 struct userstack_entry *field;
1097 struct trace_seq *s = &iter->seq;
1098 struct mm_struct *mm = NULL;
1101 trace_assign_type(field, iter->ent);
1103 trace_seq_puts(s, "<user stack trace>\n");
1105 if (tr->trace_flags & TRACE_ITER_SYM_USEROBJ) {
1106 struct task_struct *task;
1108 * we do the lookup on the thread group leader,
1109 * since individual threads might have already quit!
1112 task = find_task_by_vpid(field->tgid);
1114 mm = get_task_mm(task);
1118 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1119 unsigned long ip = field->caller[i];
1121 if (ip == ULONG_MAX || trace_seq_has_overflowed(s))
1124 trace_seq_puts(s, " => ");
1127 trace_seq_puts(s, "??");
1128 trace_seq_putc(s, '\n');
1132 seq_print_user_ip(s, mm, ip, flags);
1133 trace_seq_putc(s, '\n');
1139 return trace_handle_return(s);
1142 static struct trace_event_functions trace_user_stack_funcs = {
1143 .trace = trace_user_stack_print,
1146 static struct trace_event trace_user_stack_event = {
1147 .type = TRACE_USER_STACK,
1148 .funcs = &trace_user_stack_funcs,
1152 static enum print_line_t
1153 trace_hwlat_print(struct trace_iterator *iter, int flags,
1154 struct trace_event *event)
1156 struct trace_entry *entry = iter->ent;
1157 struct trace_seq *s = &iter->seq;
1158 struct hwlat_entry *field;
1160 trace_assign_type(field, entry);
1162 trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%ld.%09ld",
1165 field->outer_duration,
1166 field->timestamp.tv_sec,
1167 field->timestamp.tv_nsec);
1169 if (field->nmi_count) {
1171 * The generic sched_clock() is not NMI safe, thus
1172 * we only record the count and not the time.
1174 if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK))
1175 trace_seq_printf(s, " nmi-total:%llu",
1176 field->nmi_total_ts);
1177 trace_seq_printf(s, " nmi-count:%u",
1181 trace_seq_putc(s, '\n');
1183 return trace_handle_return(s);
1187 static enum print_line_t
1188 trace_hwlat_raw(struct trace_iterator *iter, int flags,
1189 struct trace_event *event)
1191 struct hwlat_entry *field;
1192 struct trace_seq *s = &iter->seq;
1194 trace_assign_type(field, iter->ent);
1196 trace_seq_printf(s, "%llu %lld %ld %09ld %u\n",
1198 field->outer_duration,
1199 field->timestamp.tv_sec,
1200 field->timestamp.tv_nsec,
1203 return trace_handle_return(s);
1206 static struct trace_event_functions trace_hwlat_funcs = {
1207 .trace = trace_hwlat_print,
1208 .raw = trace_hwlat_raw,
1211 static struct trace_event trace_hwlat_event = {
1212 .type = TRACE_HWLAT,
1213 .funcs = &trace_hwlat_funcs,
1217 static enum print_line_t
1218 trace_bputs_print(struct trace_iterator *iter, int flags,
1219 struct trace_event *event)
1221 struct trace_entry *entry = iter->ent;
1222 struct trace_seq *s = &iter->seq;
1223 struct bputs_entry *field;
1225 trace_assign_type(field, entry);
1227 seq_print_ip_sym(s, field->ip, flags);
1228 trace_seq_puts(s, ": ");
1229 trace_seq_puts(s, field->str);
1231 return trace_handle_return(s);
1235 static enum print_line_t
1236 trace_bputs_raw(struct trace_iterator *iter, int flags,
1237 struct trace_event *event)
1239 struct bputs_entry *field;
1240 struct trace_seq *s = &iter->seq;
1242 trace_assign_type(field, iter->ent);
1244 trace_seq_printf(s, ": %lx : ", field->ip);
1245 trace_seq_puts(s, field->str);
1247 return trace_handle_return(s);
1250 static struct trace_event_functions trace_bputs_funcs = {
1251 .trace = trace_bputs_print,
1252 .raw = trace_bputs_raw,
1255 static struct trace_event trace_bputs_event = {
1256 .type = TRACE_BPUTS,
1257 .funcs = &trace_bputs_funcs,
1261 static enum print_line_t
1262 trace_bprint_print(struct trace_iterator *iter, int flags,
1263 struct trace_event *event)
1265 struct trace_entry *entry = iter->ent;
1266 struct trace_seq *s = &iter->seq;
1267 struct bprint_entry *field;
1269 trace_assign_type(field, entry);
1271 seq_print_ip_sym(s, field->ip, flags);
1272 trace_seq_puts(s, ": ");
1273 trace_seq_bprintf(s, field->fmt, field->buf);
1275 return trace_handle_return(s);
1279 static enum print_line_t
1280 trace_bprint_raw(struct trace_iterator *iter, int flags,
1281 struct trace_event *event)
1283 struct bprint_entry *field;
1284 struct trace_seq *s = &iter->seq;
1286 trace_assign_type(field, iter->ent);
1288 trace_seq_printf(s, ": %lx : ", field->ip);
1289 trace_seq_bprintf(s, field->fmt, field->buf);
1291 return trace_handle_return(s);
1294 static struct trace_event_functions trace_bprint_funcs = {
1295 .trace = trace_bprint_print,
1296 .raw = trace_bprint_raw,
1299 static struct trace_event trace_bprint_event = {
1300 .type = TRACE_BPRINT,
1301 .funcs = &trace_bprint_funcs,
1305 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1306 int flags, struct trace_event *event)
1308 struct print_entry *field;
1309 struct trace_seq *s = &iter->seq;
1311 trace_assign_type(field, iter->ent);
1313 seq_print_ip_sym(s, field->ip, flags);
1314 trace_seq_printf(s, ": %s", field->buf);
1316 return trace_handle_return(s);
1319 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
1320 struct trace_event *event)
1322 struct print_entry *field;
1324 trace_assign_type(field, iter->ent);
1326 trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf);
1328 return trace_handle_return(&iter->seq);
1331 static struct trace_event_functions trace_print_funcs = {
1332 .trace = trace_print_print,
1333 .raw = trace_print_raw,
1336 static struct trace_event trace_print_event = {
1337 .type = TRACE_PRINT,
1338 .funcs = &trace_print_funcs,
1341 static enum print_line_t trace_raw_data(struct trace_iterator *iter, int flags,
1342 struct trace_event *event)
1344 struct raw_data_entry *field;
1347 trace_assign_type(field, iter->ent);
1349 trace_seq_printf(&iter->seq, "# %x buf:", field->id);
1351 for (i = 0; i < iter->ent_size - offsetof(struct raw_data_entry, buf); i++)
1352 trace_seq_printf(&iter->seq, " %02x",
1353 (unsigned char)field->buf[i]);
1355 trace_seq_putc(&iter->seq, '\n');
1357 return trace_handle_return(&iter->seq);
1360 static struct trace_event_functions trace_raw_data_funcs = {
1361 .trace = trace_raw_data,
1362 .raw = trace_raw_data,
1365 static struct trace_event trace_raw_data_event = {
1366 .type = TRACE_RAW_DATA,
1367 .funcs = &trace_raw_data_funcs,
1371 static struct trace_event *events[] __initdata = {
1376 &trace_user_stack_event,
1378 &trace_bprint_event,
1381 &trace_raw_data_event,
1385 __init static int init_events(void)
1387 struct trace_event *event;
1390 for (i = 0; events[i]; i++) {
1393 ret = register_trace_event(event);
1395 printk(KERN_WARNING "event %d failed to register\n",
1403 early_initcall(init_events);