From: Ingo Molnar Date: Wed, 12 Nov 2008 09:11:37 +0000 (+0100) Subject: Merge branches 'tracing/ftrace' and 'tracing/urgent' into tracing/core X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=d06bbd669539215405874d8fe32ab65105e6c4bb;p=linux-beck.git Merge branches 'tracing/ftrace' and 'tracing/urgent' into tracing/core Conflicts: kernel/trace/ring_buffer.c --- d06bbd669539215405874d8fe32ab65105e6c4bb diff --cc kernel/trace/ring_buffer.c index a6b8f9d7ac96,8c5cacb25a18,b08ee9f00c8d..c04c433fbc59 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@@@ -18,6 -18,6 -18,33 +18,35 @@@@ #include "trace.h" ++ /* Global flag to disable all recording to ring buffers */ ++ static int ring_buffers_off __read_mostly; ++ ++ /** ++ * tracing_on - enable all tracing buffers ++ * ++ * This function enables all tracing buffers that may have been ++ * disabled with tracing_off. ++ */ ++ void tracing_on(void) ++ { ++ ring_buffers_off = 0; ++ } ++ ++ /** ++ * tracing_off - turn off all tracing buffers ++ * ++ * This function stops all tracing buffers from recording data. ++ * It does not disable any overhead the tracers themselves may ++ * be causing. This function simply causes all recording to ++ * the ring buffers to fail. ++ */ ++ void tracing_off(void) ++ { ++ ring_buffers_off = 1; ++ } ++ +++#include "trace.h" +++ /* Up this if you want to test the TIME_EXTENTS and normalization */ #define DEBUG_SHIFT 0 @@@@ -154,7 -154,8 -181,7 +183,8 @@@@ static inline int test_time_stamp(u64 d struct ring_buffer_per_cpu { int cpu; struct ring_buffer *buffer; - spinlock_t lock; + + spinlock_t reader_lock; /* serialize readers */ + raw_spinlock_t lock; struct lock_class_key lock_key; struct list_head pages; struct buffer_page *head_page; /* read from head */ @@@@ -188,62 -189,16 -215,32 +218,16 @@@@ struct ring_buffer_iter u64 read_stamp; }; +/* buffer may be either ring_buffer or ring_buffer_per_cpu */ #define RB_WARN_ON(buffer, cond) \ - do { \ - if (unlikely(cond)) { \ - atomic_inc(&buffer->record_disabled); \ - WARN_ON(1); \ - } \ - } while (0) - -#define RB_WARN_ON_RET(buffer, cond) \ - - do { \ - - if (unlikely(cond)) { \ + + ({ \ + + int _____ret = unlikely(cond); \ + + if (_____ret) { \ atomic_inc(&buffer->record_disabled); \ WARN_ON(1); \ - return -1; \ } \ - } while (0) - - #define RB_WARN_ON_RET(buffer, cond) \ - do { \ - if (unlikely(cond)) { \ - atomic_inc(&buffer->record_disabled); \ - WARN_ON(1); \ - return; \ - } \ - } while (0) - - #define RB_WARN_ON_RET_INT(buffer, cond) \ - do { \ - if (unlikely(cond)) { \ - atomic_inc(&buffer->record_disabled); \ - WARN_ON(1); \ - return -1; \ - } \ - } while (0) - - #define RB_WARN_ON_RET_NULL(buffer, cond) \ - do { \ - if (unlikely(cond)) { \ - atomic_inc(&buffer->record_disabled); \ - WARN_ON(1); \ - return NULL; \ - } \ - - } while (0) - - - -#define RB_WARN_ON_ONCE(buffer, cond) \ - - do { \ - - static int once; \ - - if (unlikely(cond) && !once) { \ - - once++; \ - atomic_inc(&buffer->record_disabled); \ - WARN_ON(1); \ - } \ - } while (0) - - /* buffer must be ring_buffer not per_cpu */ - #define RB_WARN_ON_UNLOCK(buffer, cond) \ - do { \ - if (unlikely(cond)) { \ - mutex_unlock(&buffer->mutex); \ - - atomic_inc(&buffer->record_disabled); \ - - WARN_ON(1); \ - return -1; \ - - } \ - - } while (0) + + _____ret; \ + + }) /** * check_pages - integrity check of buffer pages @@@@ -257,14 -212,18 -254,14 +241,18 @@@@ static int rb_check_pages(struct ring_b struct list_head *head = &cpu_buffer->pages; struct buffer_page *page, *tmp; - RB_WARN_ON_RET_INT(cpu_buffer, head->next->prev != head); - RB_WARN_ON_RET_INT(cpu_buffer, head->prev->next != head); - RB_WARN_ON_RET(cpu_buffer, head->next->prev != head); - RB_WARN_ON_RET(cpu_buffer, head->prev->next != head); + + if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) + + return -1; + + if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) + + return -1; list_for_each_entry_safe(page, tmp, head, list) { - RB_WARN_ON_RET_INT(cpu_buffer, - RB_WARN_ON_RET(cpu_buffer, - - page->list.next->prev != &page->list); - RB_WARN_ON_RET_INT(cpu_buffer, - RB_WARN_ON_RET(cpu_buffer, - - page->list.prev->next != &page->list); + + if (RB_WARN_ON(cpu_buffer, + + page->list.next->prev != &page->list)) + + return -1; + + if (RB_WARN_ON(cpu_buffer, + + page->list.prev->next != &page->list)) + + return -1; } return 0; @@@@ -321,7 -280,8 -318,7 +309,8 @@@@ rb_allocate_cpu_buffer(struct ring_buff cpu_buffer->cpu = cpu; cpu_buffer->buffer = buffer; - spin_lock_init(&cpu_buffer->lock); + + spin_lock_init(&cpu_buffer->reader_lock); + cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; INIT_LIST_HEAD(&cpu_buffer->pages); page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), @@@@ -470,13 -430,15 -467,13 +459,15 @@@@ rb_remove_pages(struct ring_buffer_per_ synchronize_sched(); for (i = 0; i < nr_pages; i++) { - RB_WARN_ON_RET(cpu_buffer, list_empty(&cpu_buffer->pages)); - BUG_ON(list_empty(&cpu_buffer->pages)); + + if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) + + return; p = cpu_buffer->pages.next; page = list_entry(p, struct buffer_page, list); list_del_init(&page->list); free_buffer_page(page); } - RB_WARN_ON_RET(cpu_buffer, list_empty(&cpu_buffer->pages)); - BUG_ON(list_empty(&cpu_buffer->pages)); + + if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) + + return; rb_reset_cpu(cpu_buffer); @@@@ -498,7 -460,8 -495,7 +489,8 @@@@ rb_insert_pages(struct ring_buffer_per_ synchronize_sched(); for (i = 0; i < nr_pages; i++) { - RB_WARN_ON_RET(cpu_buffer, list_empty(pages)); - BUG_ON(list_empty(pages)); + + if (RB_WARN_ON(cpu_buffer, list_empty(pages))) + + return; p = pages->next; page = list_entry(p, struct buffer_page, list); list_del_init(&page->list); @@@@ -553,7 -516,10 -550,7 +545,10 @@@@ int ring_buffer_resize(struct ring_buff if (size < buffer_size) { /* easy case, just free pages */ - RB_WARN_ON_UNLOCK(buffer, nr_pages >= buffer->pages); - BUG_ON(nr_pages >= buffer->pages); + + if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) { + + mutex_unlock(&buffer->mutex); + + return -1; + + } rm_pages = buffer->pages - nr_pages; @@@@ -572,8 -538,11 -569,7 +567,11 @@@@ * add these pages to the cpu_buffers. Otherwise we just free * them all and return -ENOMEM; */ - RB_WARN_ON_UNLOCK(buffer, nr_pages <= buffer->pages); - BUG_ON(nr_pages <= buffer->pages); + + if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) { + + mutex_unlock(&buffer->mutex); + + return -1; + + } + new_pages = nr_pages - buffer->pages; for_each_buffer_cpu(buffer, cpu) { @@@@ -596,7 -565,10 -592,7 +594,10 @@@@ rb_insert_pages(cpu_buffer, &pages, new_pages); } - RB_WARN_ON_UNLOCK(buffer, !list_empty(&pages)); - BUG_ON(!list_empty(&pages)); + + if (RB_WARN_ON(buffer, !list_empty(&pages))) { + + mutex_unlock(&buffer->mutex); + + return -1; + + } out: buffer->pages = nr_pages; @@@@ -684,7 -656,8 -680,7 +685,8 @@@@ static void rb_update_overflow(struct r head += rb_event_length(event)) { event = __rb_page_index(cpu_buffer->head_page, head); - RB_WARN_ON_RET(cpu_buffer, rb_null_event(event)); - BUG_ON(rb_null_event(event)); + + if (RB_WARN_ON(cpu_buffer, rb_null_event(event))) + + return; /* Only count data entries */ if (event->type != RINGBUF_TYPE_DATA) continue; @@@@ -971,7 -946,8 -965,7 +975,8 @@@@ __rb_reserve_next(struct ring_buffer_pe /* We reserved something on the buffer */ - RB_WARN_ON_RET_NULL(cpu_buffer, write > BUF_PAGE_SIZE); - BUG_ON(write > BUF_PAGE_SIZE); + + if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE)) + + return NULL; event = __rb_page_index(tail_page, tail); rb_update_event(event, type, length); @@@@ -1652,7 -1631,8 -1660,7 +1666,8 @@@@ static void rb_advance_reader(struct ri reader = rb_get_reader_page(cpu_buffer); /* This function should not be called when buffer is empty */ - RB_WARN_ON_RET(cpu_buffer, !reader); - BUG_ON(!reader); + + if (RB_WARN_ON(cpu_buffer, !reader)) + + return; event = rb_reader_event(cpu_buffer); @@@@ -1679,8 -1659,9 -1687,7 +1694,9 @@@@ static void rb_advance_iter(struct ring * Check if we are at the end of the buffer. */ if (iter->head >= rb_page_size(iter->head_page)) { - RB_WARN_ON_RET(buffer, - iter->head_page == cpu_buffer->commit_page); - BUG_ON(iter->head_page == cpu_buffer->commit_page); + + if (RB_WARN_ON(buffer, + + iter->head_page == cpu_buffer->commit_page)) + + return; rb_inc_iter(iter); return; } @@@@ -1693,9 -1674,10 -1700,8 +1709,10 @@@@ * This should not be called to advance the header if we are * at the tail of the buffer. */ - RB_WARN_ON_RET(cpu_buffer, - BUG_ON((iter->head_page == cpu_buffer->commit_page) && - (iter->head + length > rb_commit_index(cpu_buffer))); + + if (RB_WARN_ON(cpu_buffer, + (iter->head_page == cpu_buffer->commit_page) && - (iter->head + length > rb_commit_index(cpu_buffer))); + + (iter->head + length > rb_commit_index(cpu_buffer)))) + + return; rb_update_iter_read_stamp(iter, event); @@@@ -1909,11 -1920,11 -1915,9 +1955,11 @@@@ ring_buffer_read_start(struct ring_buff atomic_inc(&cpu_buffer->record_disabled); synchronize_sched(); - local_irq_save(flags); - spin_lock_irqsave(&cpu_buffer->lock, flags); + + spin_lock_irqsave(&cpu_buffer->reader_lock, flags); + __raw_spin_lock(&cpu_buffer->lock); ring_buffer_iter_reset(iter); - spin_unlock_irqrestore(&cpu_buffer->lock, flags); + __raw_spin_unlock(&cpu_buffer->lock); - local_irq_restore(flags); + + spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); return iter; } @@@@ -1999,13 -2015,15 -2003,11 +2050,15 @@@@ void ring_buffer_reset_cpu(struct ring_ if (!cpu_isset(cpu, buffer->cpumask)) return; - local_irq_save(flags); - spin_lock_irqsave(&cpu_buffer->lock, flags); + + spin_lock_irqsave(&cpu_buffer->reader_lock, flags); + + + __raw_spin_lock(&cpu_buffer->lock); rb_reset_cpu(cpu_buffer); - spin_unlock_irqrestore(&cpu_buffer->lock, flags); + __raw_spin_unlock(&cpu_buffer->lock); - local_irq_restore(flags); + + + + spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); } /**