return ent;
}
+/*
+ * No necessary locking here. The worst thing which can
+ * happen is loosing events consumed at the same time
+ * by a trace_pipe reader.
+ * Other than that, we don't risk to crash the ring buffer
+ * because it serializes the readers.
+ *
+ * The current tracer is copied to avoid a global locking
+ * all around.
+ */
static void *s_start(struct seq_file *m, loff_t *pos)
{
struct trace_iterator *iter = m->private;
+ static struct tracer *old_tracer;
int cpu_file = iter->cpu_file;
void *p = NULL;
loff_t l = 0;
int cpu;
+ /* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock);
-
- if (!current_trace || current_trace != iter->trace) {
- mutex_unlock(&trace_types_lock);
- return NULL;
+ if (unlikely(old_tracer != current_trace && current_trace)) {
+ old_tracer = current_trace;
+ *iter->trace = *current_trace;
}
+ mutex_unlock(&trace_types_lock);
atomic_inc(&trace_record_cmdline_disabled);
static void s_stop(struct seq_file *m, void *p)
{
atomic_dec(&trace_record_cmdline_disabled);
- mutex_unlock(&trace_types_lock);
}
static void print_lat_help_header(struct seq_file *m)
goto out;
}
+ /*
+ * We make a copy of the current tracer to avoid concurrent
+ * changes on it while we are reading.
+ */
mutex_lock(&trace_types_lock);
+ iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
+ if (!iter->trace) {
+ *ret = -ENOMEM;
+ goto fail;
+ }
+ if (current_trace)
+ *iter->trace = *current_trace;
+
if (current_trace && current_trace->print_max)
iter->tr = &max_tr;
else
iter->tr = &global_trace;
- iter->trace = current_trace;
iter->pos = -1;
+ mutex_init(&iter->mutex);
iter->cpu_file = cpu_file;
/* Notify the tracer early; before we stop tracing. */
if (iter->buffer_iter[cpu])
ring_buffer_read_finish(iter->buffer_iter[cpu]);
}
-fail:
+ fail:
mutex_unlock(&trace_types_lock);
+ kfree(iter->trace);
kfree(iter);
return ERR_PTR(-ENOMEM);
mutex_unlock(&trace_types_lock);
seq_release(inode, file);
+ mutex_destroy(&iter->mutex);
+ kfree(iter->trace);
kfree(iter);
return 0;
}
goto out;
}
+ /*
+ * We make a copy of the current tracer to avoid concurrent
+ * changes on it while we are reading.
+ */
+ iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
+ if (!iter->trace) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ if (current_trace)
+ *iter->trace = *current_trace;
+
if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
- kfree(iter);
ret = -ENOMEM;
- goto out;
+ goto fail;
}
/* trace pipe does not show start of buffer */
iter->cpu_file = cpu_file;
iter->tr = &global_trace;
- iter->trace = current_trace;
+ mutex_init(&iter->mutex);
filp->private_data = iter;
if (iter->trace->pipe_open)
out:
mutex_unlock(&trace_types_lock);
return ret;
+
+fail:
+ kfree(iter->trace);
+ kfree(iter);
+ mutex_unlock(&trace_types_lock);
+ return ret;
}
static int tracing_release_pipe(struct inode *inode, struct file *file)
mutex_unlock(&trace_types_lock);
free_cpumask_var(iter->started);
+ mutex_destroy(&iter->mutex);
+ kfree(iter->trace);
kfree(iter);
return 0;
return -EAGAIN;
}
- mutex_unlock(&trace_types_lock);
+ mutex_unlock(&iter->mutex);
iter->trace->wait_pipe(iter);
- mutex_lock(&trace_types_lock);
+ mutex_lock(&iter->mutex);
if (signal_pending(current))
return -EINTR;
- if (iter->trace != current_trace)
- return 0;
-
/*
* We block until we read something and tracing is disabled.
* We still block if tracing is disabled, but we have never
size_t cnt, loff_t *ppos)
{
struct trace_iterator *iter = filp->private_data;
+ static struct tracer *old_tracer;
ssize_t sret;
/* return any leftover data */
trace_seq_reset(&iter->seq);
+ /* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock);
+ if (unlikely(old_tracer != current_trace && current_trace)) {
+ old_tracer = current_trace;
+ *iter->trace = *current_trace;
+ }
+ mutex_unlock(&trace_types_lock);
+
+ /*
+ * Avoid more than one consumer on a single file descriptor
+ * This is just a matter of traces coherency, the ring buffer itself
+ * is protected.
+ */
+ mutex_lock(&iter->mutex);
if (iter->trace->read) {
sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
if (sret)
goto waitagain;
out:
- mutex_unlock(&trace_types_lock);
+ mutex_unlock(&iter->mutex);
return sret;
}
.ops = &tracing_pipe_buf_ops,
.spd_release = tracing_spd_release_pipe,
};
+ static struct tracer *old_tracer;
ssize_t ret;
size_t rem;
unsigned int i;
+ /* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock);
+ if (unlikely(old_tracer != current_trace && current_trace)) {
+ old_tracer = current_trace;
+ *iter->trace = *current_trace;
+ }
+ mutex_unlock(&trace_types_lock);
+
+ mutex_lock(&iter->mutex);
if (iter->trace->splice_read) {
ret = iter->trace->splice_read(iter, filp,
trace_seq_reset(&iter->seq);
}
- mutex_unlock(&trace_types_lock);
+ mutex_unlock(&iter->mutex);
spd.nr_pages = i;
return splice_to_pipe(pipe, &spd);
out_err:
- mutex_unlock(&trace_types_lock);
+ mutex_unlock(&iter->mutex);
return ret;
}