]> git.karo-electronics.de Git - linux-beck.git/commitdiff
rcu: Add tracing for _rcu_barrier()
authorPaul E. McKenney <paul.mckenney@linaro.org>
Thu, 24 May 2012 01:47:05 +0000 (18:47 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Mon, 2 Jul 2012 19:33:23 +0000 (12:33 -0700)
This commit adds event tracing for _rcu_barrier() execution.  This
is defined only if RCU_TRACE=y.

Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
include/trace/events/rcu.h
kernel/rcutree.c

index d274734b2aa42fee56d7ce7ab2b7898d39521e7e..5bde94d8585b77c71e957926ed9ca7b5b85bcc5c 100644 (file)
@@ -541,6 +541,50 @@ TRACE_EVENT(rcu_torture_read,
                  __entry->rcutorturename, __entry->rhp)
 );
 
+/*
+ * Tracepoint for _rcu_barrier() execution.  The string "s" describes
+ * the _rcu_barrier phase:
+ *     "Begin": rcu_barrier_callback() started.
+ *     "Check": rcu_barrier_callback() checking for piggybacking.
+ *     "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
+ *     "Inc1": rcu_barrier_callback() piggyback check counter incremented.
+ *     "Offline": rcu_barrier_callback() found offline CPU
+ *     "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
+ *     "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
+ *     "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
+ *     "CB": An rcu_barrier_callback() invoked a callback, not the last.
+ *     "LastCB": An rcu_barrier_callback() invoked the last callback.
+ *     "Inc2": rcu_barrier_callback() piggyback check counter incremented.
+ * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
+ * is the count of remaining callbacks, and "done" is the piggybacking count.
+ */
+TRACE_EVENT(rcu_barrier,
+
+       TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done),
+
+       TP_ARGS(rcuname, s, cpu, cnt, done),
+
+       TP_STRUCT__entry(
+               __field(char *, rcuname)
+               __field(char *, s)
+               __field(int, cpu)
+               __field(int, cnt)
+               __field(unsigned long, done)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->s = s;
+               __entry->cpu = cpu;
+               __entry->cnt = cnt;
+               __entry->done = done;
+       ),
+
+       TP_printk("%s %s cpu %d remaining %d # %lu",
+                 __entry->rcuname, __entry->s, __entry->cpu, __entry->cnt,
+                 __entry->done)
+);
+
 #else /* #ifdef CONFIG_RCU_TRACE */
 
 #define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
@@ -564,6 +608,7 @@ TRACE_EVENT(rcu_torture_read,
 #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
        do { } while (0)
 #define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
+#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
 
 #endif /* #else #ifdef CONFIG_RCU_TRACE */
 
index 6bb5d562253f415154e6f052afa148ee99c36373..dda43d826504a6e5085db45499f14e6dd36cd184 100644 (file)
@@ -2259,6 +2259,17 @@ static int rcu_cpu_has_callbacks(int cpu)
               rcu_preempt_cpu_has_callbacks(cpu);
 }
 
+/*
+ * Helper function for _rcu_barrier() tracing.  If tracing is disabled,
+ * the compiler is expected to optimize this away.
+ */
+static void _rcu_barrier_trace(struct rcu_state *rsp, char *s,
+                              int cpu, unsigned long done)
+{
+       trace_rcu_barrier(rsp->name, s, cpu,
+                         atomic_read(&rsp->barrier_cpu_count), done);
+}
+
 /*
  * RCU callback function for _rcu_barrier().  If we are last, wake
  * up the task executing _rcu_barrier().
@@ -2268,8 +2279,12 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
        struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
        struct rcu_state *rsp = rdp->rsp;
 
-       if (atomic_dec_and_test(&rsp->barrier_cpu_count))
+       if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
+               _rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
                complete(&rsp->barrier_completion);
+       } else {
+               _rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
+       }
 }
 
 /*
@@ -2280,6 +2295,7 @@ static void rcu_barrier_func(void *type)
        struct rcu_state *rsp = type;
        struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
 
+       _rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
        atomic_inc(&rsp->barrier_cpu_count);
        rsp->call(&rdp->barrier_head, rcu_barrier_callback);
 }
@@ -2298,6 +2314,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
        unsigned long snap_done;
 
        init_rcu_head_on_stack(&rd.barrier_head);
+       _rcu_barrier_trace(rsp, "Begin", -1, snap);
 
        /* Take mutex to serialize concurrent rcu_barrier() requests. */
        mutex_lock(&rsp->barrier_mutex);
@@ -2315,7 +2332,9 @@ static void _rcu_barrier(struct rcu_state *rsp)
         * value up to the next even number and adds two before comparing.
         */
        snap_done = ACCESS_ONCE(rsp->n_barrier_done);
+       _rcu_barrier_trace(rsp, "Check", -1, snap_done);
        if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) {
+               _rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
                smp_mb(); /* caller's subsequent code after above check. */
                mutex_unlock(&rsp->barrier_mutex);
                return;
@@ -2328,6 +2347,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
         */
        ACCESS_ONCE(rsp->n_barrier_done)++;
        WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
+       _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
        smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
 
        /*
@@ -2364,13 +2384,19 @@ static void _rcu_barrier(struct rcu_state *rsp)
                preempt_disable();
                rdp = per_cpu_ptr(rsp->rda, cpu);
                if (cpu_is_offline(cpu)) {
+                       _rcu_barrier_trace(rsp, "Offline", cpu,
+                                          rsp->n_barrier_done);
                        preempt_enable();
                        while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen))
                                schedule_timeout_interruptible(1);
                } else if (ACCESS_ONCE(rdp->qlen)) {
+                       _rcu_barrier_trace(rsp, "OnlineQ", cpu,
+                                          rsp->n_barrier_done);
                        smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
                        preempt_enable();
                } else {
+                       _rcu_barrier_trace(rsp, "OnlineNQ", cpu,
+                                          rsp->n_barrier_done);
                        preempt_enable();
                }
        }
@@ -2403,6 +2429,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
        smp_mb(); /* Keep increment after above mechanism. */
        ACCESS_ONCE(rsp->n_barrier_done)++;
        WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
+       _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
        smp_mb(); /* Keep increment before caller's subsequent code. */
 
        /* Wait for all rcu_barrier_callback() callbacks to be invoked. */