]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
bcache: Better alloc tracepoints
authorKent Overstreet <kmo@daterainc.com>
Thu, 13 Feb 2014 02:43:32 +0000 (18:43 -0800)
committerKent Overstreet <kmo@daterainc.com>
Tue, 18 Mar 2014 19:22:35 +0000 (12:22 -0700)
Change the invalidate tracepoint to indicate how much data we're invalidating,
and change the alloc tracepoints to indicate what offset they're for.

Signed-off-by: Kent Overstreet <kmo@daterainc.com>
drivers/md/bcache/alloc.c
drivers/md/bcache/trace.c
include/trace/events/bcache.h

index c0d37d0824439f2daff4c049ab82dc6b3c6e646e..a3e1427945f24c0a21263cc97d9d22e0e2f195b2 100644 (file)
@@ -162,10 +162,15 @@ static bool can_invalidate_bucket(struct cache *ca, struct bucket *b)
 
 static void invalidate_one_bucket(struct cache *ca, struct bucket *b)
 {
+       size_t bucket = b - ca->buckets;
+
+       if (GC_SECTORS_USED(b))
+               trace_bcache_invalidate(ca, bucket);
+
        bch_inc_gen(ca, b);
        b->prio = INITIAL_PRIO;
        atomic_inc(&b->pin);
-       fifo_push(&ca->free_inc, b - ca->buckets);
+       fifo_push(&ca->free_inc, bucket);
 }
 
 /*
@@ -301,8 +306,6 @@ static void invalidate_buckets(struct cache *ca)
                invalidate_buckets_random(ca);
                break;
        }
-
-       trace_bcache_alloc_invalidate(ca);
 }
 
 #define allocator_wait(ca, cond)                                       \
@@ -408,8 +411,10 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
            fifo_pop(&ca->free[reserve], r))
                goto out;
 
-       if (!wait)
+       if (!wait) {
+               trace_bcache_alloc_fail(ca, reserve);
                return -1;
+       }
 
        do {
                prepare_to_wait(&ca->set->bucket_wait, &w,
@@ -425,6 +430,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
 out:
        wake_up_process(ca->alloc_thread);
 
+       trace_bcache_alloc(ca, reserve);
+
        if (expensive_debug_checks(ca->set)) {
                size_t iter;
                long i;
index adbc3df17a8063933fd395de7763c5e9d560ea67..b7820b0d2621a1d34aa971c84601e884b33fc023 100644 (file)
@@ -45,7 +45,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_split);
 EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_compact);
 EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_set_root);
 
-EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_invalidate);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_invalidate);
 EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_fail);
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback);
index 7110897c3dfa595e385b8a946f7547ca05583fec..8fc2a7134d3c9e2a4e50d9df9bfec9621b95bf40 100644 (file)
@@ -399,26 +399,43 @@ TRACE_EVENT(bcache_keyscan,
 
 /* Allocator */
 
-TRACE_EVENT(bcache_alloc_invalidate,
-       TP_PROTO(struct cache *ca),
-       TP_ARGS(ca),
+TRACE_EVENT(bcache_invalidate,
+       TP_PROTO(struct cache *ca, size_t bucket),
+       TP_ARGS(ca, bucket),
 
        TP_STRUCT__entry(
-               __field(unsigned,       free                    )
-               __field(unsigned,       free_inc                )
-               __field(unsigned,       free_inc_size           )
-               __field(unsigned,       unused                  )
+               __field(unsigned,       sectors                 )
+               __field(dev_t,          dev                     )
+               __field(__u64,          offset                  )
        ),
 
        TP_fast_assign(
-               __entry->free           = fifo_used(&ca->free[RESERVE_NONE]);
-               __entry->free_inc       = fifo_used(&ca->free_inc);
-               __entry->free_inc_size  = ca->free_inc.size;
-               __entry->unused         = fifo_used(&ca->unused);
+               __entry->dev            = ca->bdev->bd_dev;
+               __entry->offset         = bucket << ca->set->bucket_bits;
+               __entry->sectors        = GC_SECTORS_USED(&ca->buckets[bucket]);
        ),
 
-       TP_printk("free %u free_inc %u/%u unused %u", __entry->free,
-                 __entry->free_inc, __entry->free_inc_size, __entry->unused)
+       TP_printk("invalidated %u sectors at %d,%d sector=%llu",
+                 __entry->sectors, MAJOR(__entry->dev),
+                 MINOR(__entry->dev), __entry->offset)
+);
+
+TRACE_EVENT(bcache_alloc,
+       TP_PROTO(struct cache *ca, size_t bucket),
+       TP_ARGS(ca, bucket),
+
+       TP_STRUCT__entry(
+               __field(dev_t,          dev                     )
+               __field(__u64,          offset                  )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = ca->bdev->bd_dev;
+               __entry->offset         = bucket << ca->set->bucket_bits;
+       ),
+
+       TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
+                 MINOR(__entry->dev), __entry->offset)
 );
 
 TRACE_EVENT(bcache_alloc_fail,
@@ -426,6 +443,7 @@ TRACE_EVENT(bcache_alloc_fail,
        TP_ARGS(ca, reserve),
 
        TP_STRUCT__entry(
+               __field(dev_t,          dev                     )
                __field(unsigned,       free                    )
                __field(unsigned,       free_inc                )
                __field(unsigned,       unused                  )
@@ -433,13 +451,15 @@ TRACE_EVENT(bcache_alloc_fail,
        ),
 
        TP_fast_assign(
+               __entry->dev            = ca->bdev->bd_dev;
                __entry->free           = fifo_used(&ca->free[reserve]);
                __entry->free_inc       = fifo_used(&ca->free_inc);
                __entry->unused         = fifo_used(&ca->unused);
                __entry->blocked        = atomic_read(&ca->set->prio_blocked);
        ),
 
-       TP_printk("free %u free_inc %u unused %u blocked %u", __entry->free,
+       TP_printk("alloc fail %d,%d free %u free_inc %u unused %u blocked %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
                  __entry->free_inc, __entry->unused, __entry->blocked)
 );