]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
ring-buffer: remove unneeded conditional in rb_reserve_next
authorSteven Rostedt <srostedt@redhat.com>
Wed, 6 May 2009 14:26:45 +0000 (10:26 -0400)
committerSteven Rostedt <rostedt@goodmis.org>
Wed, 6 May 2009 16:49:19 +0000 (12:49 -0400)
The code in __rb_reserve_next checks on page overflow if it is the
original commiter and then resets the page back to the original
setting.  Although this is fine, and the code is correct, it is
a bit fragil. Some experimental work I did breaks it easily.

The better and more robust solution is to have all commiters that
overflow the page, simply subtract what they added.

[ Impact: more robust ring buffer account management ]

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
kernel/trace/ring_buffer.c

index 424129eb20a454113c7578d5f5a8af7fec5c9b5b..03ed52b67db308c7f05c7e8354551f63a97c8765 100644 (file)
@@ -1290,9 +1290,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
                rb_event_set_padding(event);
        }
 
-       if (tail <= BUF_PAGE_SIZE)
-               /* Set the write back to the previous setting */
-               local_set(&tail_page->write, tail);
+       /* Set the write back to the previous setting */
+       local_sub(length, &tail_page->write);
 
        /*
         * If this was a commit entry that failed,
@@ -1311,8 +1310,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
 
  out_reset:
        /* reset write */
-       if (tail <= BUF_PAGE_SIZE)
-               local_set(&tail_page->write, tail);
+       local_sub(length, &tail_page->write);
 
        if (likely(lock_taken))
                __raw_spin_unlock(&cpu_buffer->lock);