]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
perf/x86: Warn to early_printk() in case irq_work is too slow
authorPeter Zijlstra <peterz@infradead.org>
Tue, 11 Feb 2014 15:01:16 +0000 (16:01 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 21 Feb 2014 20:49:07 +0000 (21:49 +0100)
On Mon, Feb 10, 2014 at 08:45:16AM -0800, Dave Hansen wrote:
> The reason I coded this up was that NMIs were firing off so fast that
> nothing else was getting a chance to run.  With this patch, at least the
> printk() would come out and I'd have some idea what was going on.

It will start spewing to early_printk() (which is a lot nicer to use
from NMI context too) when it fails to queue the IRQ-work because its
already enqueued.

It does have the false-positive for when two CPUs trigger the warn
concurrently, but that should be rare and some extra clutter on the
early printk shouldn't be a problem.

Cc: hpa@zytor.com
Cc: tglx@linutronix.de
Cc: dzickus@redhat.com
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: mingo@kernel.org
Fixes: 6a02ad66b2c4 ("perf/x86: Push the duration-logging printk() to IRQ context")
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20140211150116.GO27965@twins.programming.kicks-ass.net
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/irq_work.h
kernel/events/core.c
kernel/irq_work.c

index add13c8624b701ea9f8e1ccf93d18a9d4ff0f2da..19ae05d4b8ec26b6dc6fa5f568d2b25b698e3417 100644 (file)
@@ -32,7 +32,7 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
 
 #define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
 
-void irq_work_queue(struct irq_work *work);
+bool irq_work_queue(struct irq_work *work);
 void irq_work_run(void);
 void irq_work_sync(struct irq_work *work);
 
index 2067cbb378eb40e7f0dbf348b8d54fe401a1adb9..45e5543e2a1e43dd1657ecc5c7503da28d5dea44 100644 (file)
@@ -243,7 +243,7 @@ static void perf_duration_warn(struct irq_work *w)
        printk_ratelimited(KERN_WARNING
                        "perf interrupt took too long (%lld > %lld), lowering "
                        "kernel.perf_event_max_sample_rate to %d\n",
-                       avg_local_sample_len, allowed_ns,
+                       avg_local_sample_len, allowed_ns >> 1,
                        sysctl_perf_event_sample_rate);
 }
 
@@ -283,7 +283,12 @@ void perf_sample_event_took(u64 sample_len_ns)
 
        update_perf_cpu_limits();
 
-       irq_work_queue(&perf_duration_work);
+       if (!irq_work_queue(&perf_duration_work)) {
+               early_printk("perf interrupt took too long (%lld > %lld), lowering "
+                            "kernel.perf_event_max_sample_rate to %d\n",
+                            avg_local_sample_len, allowed_ns >> 1,
+                            sysctl_perf_event_sample_rate);
+       }
 }
 
 static atomic64_t perf_event_id;
index 55fcce6065cf6bc3213829fd8188cae90b61b778..a82170e2fa78e50fbefa29c6cf71b1eccadae175 100644 (file)
@@ -61,11 +61,11 @@ void __weak arch_irq_work_raise(void)
  *
  * Can be re-enqueued while the callback is still in progress.
  */
-void irq_work_queue(struct irq_work *work)
+bool irq_work_queue(struct irq_work *work)
 {
        /* Only queue if not already pending */
        if (!irq_work_claim(work))
-               return;
+               return false;
 
        /* Queue the entry and raise the IPI if needed. */
        preempt_disable();
@@ -83,6 +83,8 @@ void irq_work_queue(struct irq_work *work)
        }
 
        preempt_enable();
+
+       return true;
 }
 EXPORT_SYMBOL_GPL(irq_work_queue);