static struct trace_event trace_blk_event = {
.type = TRACE_BLK,
.trace = blk_trace_event_print,
- .latency_trace = blk_trace_event_print,
.binary = blk_trace_event_print_binary,
};
unsigned long flags;
unsigned int commit;
unsigned int read;
+ u64 save_timestamp;
int ret = -1;
/*
if (len < size)
goto out;
+ /* save the current timestamp, since the user will need it */
+ save_timestamp = cpu_buffer->read_stamp;
+
/* Need to copy one event at a time */
do {
memcpy(bpage->data + pos, rpage->data + rpos, size);
/* update bpage */
local_set(&bpage->commit, pos);
- bpage->time_stamp = rpage->time_stamp;
+ bpage->time_stamp = save_timestamp;
/* we copied everything to the beginning */
read = 0;
"sym-userobj",
"printk-msg-only",
"context-info",
+ "latency-format",
NULL
};
int len;
int ret;
+ if (!cnt)
+ return 0;
+
if (s->len <= s->readpos)
return -EBUSY;
if (cnt > len)
cnt = len;
ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
- if (ret)
+ if (ret == cnt)
return -EFAULT;
- s->readpos += len;
+ cnt -= ret;
+
+ s->readpos += cnt;
return cnt;
}
if (!ret)
return -EFAULT;
- s->readpos += len;
+ s->readpos += cnt;
return cnt;
}
trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
}
-static enum print_line_t print_lat_fmt(struct trace_iterator *iter)
-{
- struct trace_seq *s = &iter->seq;
- unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
- struct trace_event *event;
- struct trace_entry *entry = iter->ent;
-
- test_cpu_buff_start(iter);
-
- event = ftrace_find_event(entry->type);
-
- if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
- if (!trace_print_lat_context(iter))
- goto partial;
- }
-
- if (event)
- return event->latency_trace(iter, sym_flags);
-
- if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
- goto partial;
-
- return TRACE_TYPE_HANDLED;
-partial:
- return TRACE_TYPE_PARTIAL_LINE;
-}
-
static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
event = ftrace_find_event(entry->type);
if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
- if (!trace_print_context(iter))
- goto partial;
+ if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
+ if (!trace_print_lat_context(iter))
+ goto partial;
+ } else {
+ if (!trace_print_context(iter))
+ goto partial;
+ }
}
if (event)
if (trace_flags & TRACE_ITER_RAW)
return print_raw_fmt(iter);
- if (iter->iter_flags & TRACE_FILE_LAT_FMT)
- return print_lat_fmt(iter);
-
return print_trace_fmt(iter);
}
iter = __tracing_open(inode, file);
if (IS_ERR(iter))
ret = PTR_ERR(iter);
-
- return ret;
-}
-
-static int tracing_lt_open(struct inode *inode, struct file *file)
-{
- struct trace_iterator *iter;
- int ret = 0;
-
- iter = __tracing_open(inode, file);
-
- if (IS_ERR(iter))
- ret = PTR_ERR(iter);
- else
+ else if (trace_flags & TRACE_ITER_LATENCY_FMT)
iter->iter_flags |= TRACE_FILE_LAT_FMT;
return ret;
}
-
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
.release = tracing_release,
};
-static struct file_operations tracing_lt_fops = {
- .open = tracing_lt_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = tracing_release,
-};
-
static struct file_operations show_traces_fops = {
.open = show_traces_open,
.read = seq_read,
ssize_t ret;
size_t size;
+ if (!count)
+ return 0;
+
/* Do we have previous read data to read? */
if (info->read < PAGE_SIZE)
goto read;
size = count;
ret = copy_to_user(ubuf, info->spare + info->read, size);
- if (ret)
+ if (ret == size)
return -EFAULT;
+ size -= ret;
+
*ppos += size;
info->read += size;
printk(KERN_TRACE "Dumping ftrace buffer:\n");
+ /* Simulate the iterator */
iter.tr = &global_trace;
iter.trace = current_trace;
+ iter.cpu_file = TRACE_PIPE_ALL_CPU;
/*
* We need to stop all tracing on all CPUS to read the
TRACE_ITER_USERSTACKTRACE = 0x4000,
TRACE_ITER_SYM_USEROBJ = 0x8000,
TRACE_ITER_PRINTK_MSGONLY = 0x10000,
- TRACE_ITER_CONTEXT_INFO = 0x20000 /* Print pid/cpu/time */
+ TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
+ TRACE_ITER_LATENCY_FMT = 0x40000,
};
/*
static struct trace_event trace_branch_event = {
.type = TRACE_BRANCH,
.trace = trace_branch_print,
- .latency_trace = trace_branch_print,
};
static struct tracer branch_trace __read_mostly =
static int trace_type __read_mostly;
+static int save_lat_flag;
+
#ifdef CONFIG_PREEMPT_TRACER
static inline int
preempt_trace(void)
static void __irqsoff_tracer_init(struct trace_array *tr)
{
+ save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
+ trace_flags |= TRACE_ITER_LATENCY_FMT;
+
tracing_max_latency = 0;
irqsoff_trace = tr;
/* make sure that the tracer is visible */
static void irqsoff_tracer_reset(struct trace_array *tr)
{
stop_irqsoff_tracer(tr);
+
+ if (!save_lat_flag)
+ trace_flags &= ~TRACE_ITER_LATENCY_FMT;
}
static void irqsoff_tracer_start(struct trace_array *tr)
if (event->trace == NULL)
event->trace = trace_nop_print;
- if (event->latency_trace == NULL)
- event->latency_trace = trace_nop_print;
if (event->raw == NULL)
event->raw = trace_nop_print;
if (event->hex == NULL)
}
/* TRACE_FN */
-static enum print_line_t trace_fn_latency(struct trace_iterator *iter,
- int flags)
-{
- struct ftrace_entry *field;
- struct trace_seq *s = &iter->seq;
-
- trace_assign_type(field, iter->ent);
-
- if (!seq_print_ip_sym(s, field->ip, flags))
- goto partial;
- if (!trace_seq_puts(s, " ("))
- goto partial;
- if (!seq_print_ip_sym(s, field->parent_ip, flags))
- goto partial;
- if (!trace_seq_puts(s, ")\n"))
- goto partial;
-
- return TRACE_TYPE_HANDLED;
-
- partial:
- return TRACE_TYPE_PARTIAL_LINE;
-}
-
static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
{
struct ftrace_entry *field;
static struct trace_event trace_fn_event = {
.type = TRACE_FN,
.trace = trace_fn_trace,
- .latency_trace = trace_fn_latency,
.raw = trace_fn_raw,
.hex = trace_fn_hex,
.binary = trace_fn_bin,
static struct trace_event trace_ctx_event = {
.type = TRACE_CTX,
.trace = trace_ctx_print,
- .latency_trace = trace_ctx_print,
.raw = trace_ctx_raw,
.hex = trace_ctx_hex,
.binary = trace_ctxwake_bin,
static struct trace_event trace_wake_event = {
.type = TRACE_WAKE,
.trace = trace_wake_print,
- .latency_trace = trace_wake_print,
.raw = trace_wake_raw,
.hex = trace_wake_hex,
.binary = trace_ctxwake_bin,
static struct trace_event trace_special_event = {
.type = TRACE_SPECIAL,
.trace = trace_special_print,
- .latency_trace = trace_special_print,
.raw = trace_special_print,
.hex = trace_special_hex,
.binary = trace_special_bin,
static struct trace_event trace_stack_event = {
.type = TRACE_STACK,
.trace = trace_stack_print,
- .latency_trace = trace_stack_print,
.raw = trace_special_print,
.hex = trace_special_hex,
.binary = trace_special_bin,
static struct trace_event trace_user_stack_event = {
.type = TRACE_USER_STACK,
.trace = trace_user_stack_print,
- .latency_trace = trace_user_stack_print,
.raw = trace_special_print,
.hex = trace_special_hex,
.binary = trace_special_bin,
static struct trace_event trace_print_event = {
.type = TRACE_PRINT,
.trace = trace_print_print,
- .latency_trace = trace_print_print,
.raw = trace_print_raw,
};
struct hlist_node node;
int type;
trace_print_func trace;
- trace_print_func latency_trace;
trace_print_func raw;
trace_print_func hex;
trace_print_func binary;
static void __wakeup_reset(struct trace_array *tr);
+static int save_lat_flag;
+
#ifdef CONFIG_FUNCTION_TRACER
/*
* irqsoff uses its own tracer function to keep the overhead down:
static int __wakeup_tracer_init(struct trace_array *tr)
{
+ save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
+ trace_flags |= TRACE_ITER_LATENCY_FMT;
+
tracing_max_latency = 0;
wakeup_trace = tr;
start_wakeup_tracer(tr);
stop_wakeup_tracer(tr);
/* make sure we put back any tasks we are tracing */
wakeup_reset(tr);
+
+ if (!save_lat_flag)
+ trace_flags &= ~TRACE_ITER_LATENCY_FMT;
}
static void wakeup_tracer_start(struct trace_array *tr)