]> git.karo-electronics.de Git - linux-beck.git/blobdiff - kernel/trace/ring_buffer.c
Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-beck.git] / kernel / trace / ring_buffer.c
index a1ca4956ab5ec3673b04ddd839dd9edec26d1bf4..2326b04c95c493fec48418746c0194980ad4b4fa 100644 (file)
@@ -423,7 +423,7 @@ struct ring_buffer_per_cpu {
        int                             cpu;
        struct ring_buffer              *buffer;
        spinlock_t                      reader_lock;    /* serialize readers */
-       raw_spinlock_t                  lock;
+       arch_spinlock_t                 lock;
        struct lock_class_key           lock_key;
        struct list_head                *pages;
        struct buffer_page              *head_page;     /* read from head */
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
        cpu_buffer->buffer = buffer;
        spin_lock_init(&cpu_buffer->reader_lock);
        lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
-       cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+       cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 
        bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
                            GFP_KERNEL, cpu_to_node(cpu));
@@ -1193,9 +1193,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
        struct list_head *p;
        unsigned i;
 
-       atomic_inc(&cpu_buffer->record_disabled);
-       synchronize_sched();
-
        spin_lock_irq(&cpu_buffer->reader_lock);
        rb_head_page_deactivate(cpu_buffer);
 
@@ -1211,12 +1208,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
                return;
 
        rb_reset_cpu(cpu_buffer);
-       spin_unlock_irq(&cpu_buffer->reader_lock);
-
        rb_check_pages(cpu_buffer);
 
-       atomic_dec(&cpu_buffer->record_disabled);
-
+       spin_unlock_irq(&cpu_buffer->reader_lock);
 }
 
 static void
@@ -1227,9 +1221,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
        struct list_head *p;
        unsigned i;
 
-       atomic_inc(&cpu_buffer->record_disabled);
-       synchronize_sched();
-
        spin_lock_irq(&cpu_buffer->reader_lock);
        rb_head_page_deactivate(cpu_buffer);
 
@@ -1242,11 +1233,9 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
                list_add_tail(&bpage->list, cpu_buffer->pages);
        }
        rb_reset_cpu(cpu_buffer);
-       spin_unlock_irq(&cpu_buffer->reader_lock);
-
        rb_check_pages(cpu_buffer);
 
-       atomic_dec(&cpu_buffer->record_disabled);
+       spin_unlock_irq(&cpu_buffer->reader_lock);
 }
 
 /**
@@ -1254,11 +1243,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
  * @buffer: the buffer to resize.
  * @size: the new size.
  *
- * The tracer is responsible for making sure that the buffer is
- * not being used while changing the size.
- * Note: We may be able to change the above requirement by using
- *  RCU synchronizations.
- *
  * Minimum size is 2 * BUF_PAGE_SIZE.
  *
  * Returns -1 on failure.
@@ -1290,6 +1274,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
        if (size == buffer_size)
                return size;
 
+       atomic_inc(&buffer->record_disabled);
+
+       /* Make sure all writers are done with this buffer. */
+       synchronize_sched();
+
        mutex_lock(&buffer->mutex);
        get_online_cpus();
 
@@ -1352,6 +1341,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
        put_online_cpus();
        mutex_unlock(&buffer->mutex);
 
+       atomic_dec(&buffer->record_disabled);
+
        return size;
 
  free_pages:
@@ -1361,6 +1352,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
        }
        put_online_cpus();
        mutex_unlock(&buffer->mutex);
+       atomic_dec(&buffer->record_disabled);
        return -ENOMEM;
 
        /*
@@ -1370,6 +1362,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
  out_fail:
        put_online_cpus();
        mutex_unlock(&buffer->mutex);
+       atomic_dec(&buffer->record_disabled);
        return -1;
 }
 EXPORT_SYMBOL_GPL(ring_buffer_resize);
@@ -2834,7 +2827,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
        int ret;
 
        local_irq_save(flags);
-       __raw_spin_lock(&cpu_buffer->lock);
+       arch_spin_lock(&cpu_buffer->lock);
 
  again:
        /*
@@ -2923,7 +2916,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
        goto again;
 
  out:
-       __raw_spin_unlock(&cpu_buffer->lock);
+       arch_spin_unlock(&cpu_buffer->lock);
        local_irq_restore(flags);
 
        return reader;
@@ -3286,9 +3279,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
        synchronize_sched();
 
        spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
-       __raw_spin_lock(&cpu_buffer->lock);
+       arch_spin_lock(&cpu_buffer->lock);
        rb_iter_reset(iter);
-       __raw_spin_unlock(&cpu_buffer->lock);
+       arch_spin_unlock(&cpu_buffer->lock);
        spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 
        return iter;
@@ -3408,11 +3401,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
        if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
                goto out;
 
-       __raw_spin_lock(&cpu_buffer->lock);
+       arch_spin_lock(&cpu_buffer->lock);
 
        rb_reset_cpu(cpu_buffer);
 
-       __raw_spin_unlock(&cpu_buffer->lock);
+       arch_spin_unlock(&cpu_buffer->lock);
 
  out:
        spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);