]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - kernel/trace/ring_buffer.c
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / kernel / trace / ring_buffer.c
index 162da2305cbcc321d3f939baccb03962f6878b97..04dac263825874d69f104df2f33cbd54433a2e40 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
 #include <linux/hardirq.h>
+#include <linux/kmemcheck.h>
 #include <linux/module.h>
 #include <linux/percpu.h>
 #include <linux/mutex.h>
@@ -657,8 +658,8 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
        buffer->reader_lock_key = key;
 
        /* need at least two pages */
-       if (buffer->pages == 1)
-               buffer->pages++;
+       if (buffer->pages < 2)
+               buffer->pages = 2;
 
        /*
         * In case of non-hotplug cpu, if the ring-buffer is allocated
@@ -1154,6 +1155,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
        }
 
        event = __rb_page_index(tail_page, tail);
+       kmemcheck_annotate_bitfield(event, bitfield);
 
        /*
         * If this event is bigger than the minimum size, then
@@ -1324,6 +1326,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
        /* We reserved something on the buffer */
 
        event = __rb_page_index(tail_page, tail);
+       kmemcheck_annotate_bitfield(event, bitfield);
        rb_update_event(event, type, length);
 
        /* The passed in type is zero for DATA */
@@ -2466,6 +2469,21 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
 
+static inline int rb_ok_to_lock(void)
+{
+       /*
+        * If an NMI die dumps out the content of the ring buffer
+        * do not grab locks. We also permanently disable the ring
+        * buffer too. A one time deal is all you get from reading
+        * the ring buffer from an NMI.
+        */
+       if (likely(!in_nmi() && !oops_in_progress))
+               return 1;
+
+       tracing_off_permanent();
+       return 0;
+}
+
 /**
  * ring_buffer_peek - peek at the next event to be read
  * @buffer: The ring buffer to read
@@ -2481,14 +2499,20 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
        struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
        struct ring_buffer_event *event;
        unsigned long flags;
+       int dolock;
 
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
                return NULL;
 
+       dolock = rb_ok_to_lock();
  again:
-       spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+       local_irq_save(flags);
+       if (dolock)
+               spin_lock(&cpu_buffer->reader_lock);
        event = rb_buffer_peek(buffer, cpu, ts);
-       spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+       if (dolock)
+               spin_unlock(&cpu_buffer->reader_lock);
+       local_irq_restore(flags);
 
        if (event && event->type_len == RINGBUF_TYPE_PADDING) {
                cpu_relax();
@@ -2540,6 +2564,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event = NULL;
        unsigned long flags;
+       int dolock;
+
+       dolock = rb_ok_to_lock();
 
  again:
        /* might be called in atomic */
@@ -2549,7 +2576,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
                goto out;
 
        cpu_buffer = buffer->buffers[cpu];
-       spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+       local_irq_save(flags);
+       if (dolock)
+               spin_lock(&cpu_buffer->reader_lock);
 
        event = rb_buffer_peek(buffer, cpu, ts);
        if (!event)
@@ -2558,7 +2587,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
        rb_advance_reader(cpu_buffer);
 
  out_unlock:
-       spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+       if (dolock)
+               spin_unlock(&cpu_buffer->reader_lock);
+       local_irq_restore(flags);
 
  out:
        preempt_enable();
@@ -2756,12 +2787,25 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset);
 int ring_buffer_empty(struct ring_buffer *buffer)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
+       unsigned long flags;
+       int dolock;
        int cpu;
+       int ret;
+
+       dolock = rb_ok_to_lock();
 
        /* yes this is racy, but if you don't like the race, lock the buffer */
        for_each_buffer_cpu(buffer, cpu) {
                cpu_buffer = buffer->buffers[cpu];
-               if (!rb_per_cpu_empty(cpu_buffer))
+               local_irq_save(flags);
+               if (dolock)
+                       spin_lock(&cpu_buffer->reader_lock);
+               ret = rb_per_cpu_empty(cpu_buffer);
+               if (dolock)
+                       spin_unlock(&cpu_buffer->reader_lock);
+               local_irq_restore(flags);
+
+               if (!ret)
                        return 0;
        }
 
@@ -2777,14 +2821,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty);
 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
+       unsigned long flags;
+       int dolock;
        int ret;
 
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
                return 1;
 
+       dolock = rb_ok_to_lock();
+
        cpu_buffer = buffer->buffers[cpu];
+       local_irq_save(flags);
+       if (dolock)
+               spin_lock(&cpu_buffer->reader_lock);
        ret = rb_per_cpu_empty(cpu_buffer);
-
+       if (dolock)
+               spin_unlock(&cpu_buffer->reader_lock);
+       local_irq_restore(flags);
 
        return ret;
 }