]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
printk/sched: Introduce special printk_sched() for those awkward moments
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Mon, 27 Feb 2012 09:47:00 +0000 (10:47 +0100)
committerIngo Molnar <mingo@elte.hu>
Mon, 12 Mar 2012 19:43:16 +0000 (20:43 +0100)
There's a few awkward printk()s inside of scheduler guts that people
prefer to keep but really are rather deadlock prone. Fudge around it
by storing the text in a per-cpu buffer and poll it using the existing
printk_tick() handler.

This will drop output when its more frequent than once a tick, however
only the affinity thing could possible go that fast and for that just
one should suffice to notify the admin he's done something silly..

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/n/tip-wua3lmkt3dg8nfts66o6brne@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/printk.h
kernel/printk.c
kernel/sched/core.c
kernel/sched/rt.c

index f0e22f75143f67e3b4d8dc72ba4bc8e81e3a5570..1f77a4174ee022e57ef796cfb6e5b92b190a4163 100644 (file)
@@ -100,6 +100,11 @@ int vprintk(const char *fmt, va_list args);
 asmlinkage __printf(1, 2) __cold
 int printk(const char *fmt, ...);
 
+/*
+ * Special printk facility for scheduler use only, _DO_NOT_USE_ !
+ */
+__printf(1, 2) __cold int printk_sched(const char *fmt, ...);
+
 /*
  * Please don't use printk_ratelimit(), because it shares ratelimiting state
  * with all other unrelated printk_ratelimit() callsites.  Instead use
@@ -127,6 +132,11 @@ int printk(const char *s, ...)
 {
        return 0;
 }
+static inline __printf(1, 2) __cold
+int printk_sched(const char *s, ...)
+{
+       return 0;
+}
 static inline int printk_ratelimit(void)
 {
        return 0;
index 13c0a1143f49438f7cc97718e61ecf5bc5b23747..7ca7ba591e21a5544a1be7d585c9d676b09144b2 100644 (file)
@@ -1208,13 +1208,47 @@ int is_console_locked(void)
        return console_locked;
 }
 
+/*
+ * Delayed printk facility, for scheduler-internal messages:
+ */
+#define PRINTK_BUF_SIZE                512
+
+#define PRINTK_PENDING_WAKEUP  0x01
+#define PRINTK_PENDING_SCHED   0x02
+
 static DEFINE_PER_CPU(int, printk_pending);
+static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
+
+int printk_sched(const char *fmt, ...)
+{
+       unsigned long flags;
+       va_list args;
+       char *buf;
+       int r;
+
+       local_irq_save(flags);
+       buf = __get_cpu_var(printk_sched_buf);
+
+       va_start(args, fmt);
+       r = vsnprintf(buf, PRINTK_BUF_SIZE, fmt, args);
+       va_end(args);
+
+       __this_cpu_or(printk_pending, PRINTK_PENDING_SCHED);
+       local_irq_restore(flags);
+
+       return r;
+}
 
 void printk_tick(void)
 {
        if (__this_cpu_read(printk_pending)) {
-               __this_cpu_write(printk_pending, 0);
-               wake_up_interruptible(&log_wait);
+               int pending = __this_cpu_xchg(printk_pending, 0);
+               if (pending & PRINTK_PENDING_SCHED) {
+                       char *buf = __get_cpu_var(printk_sched_buf);
+                       printk(KERN_WARNING "[sched_delayed] %s", buf);
+               }
+               if (pending & PRINTK_PENDING_WAKEUP)
+                       wake_up_interruptible(&log_wait);
        }
 }
 
@@ -1228,7 +1262,7 @@ int printk_needs_cpu(int cpu)
 void wake_up_klogd(void)
 {
        if (waitqueue_active(&log_wait))
-               this_cpu_write(printk_pending, 1);
+               this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
 }
 
 /**
index b1ccce819ce262b7ce652856d5e09d2fa09d4e76..8781cec7c3e61a2554745dca759ae653a7c2b49d 100644 (file)
@@ -1284,7 +1284,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
         * leave kernel.
         */
        if (p->mm && printk_ratelimit()) {
-               printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
+               printk_sched("process %d (%s) no longer affine to cpu%d\n",
                                task_pid_nr(p), p->comm, cpu);
        }
 
index 7f7e7cdcb472d21c47ab226bbffeb17700b0f3b5..b60dad720173e6f396eb9216dc81d85cf8b5fdf0 100644 (file)
@@ -864,8 +864,14 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
                 * but accrue some time due to boosting.
                 */
                if (likely(rt_b->rt_runtime)) {
+                       static bool once = false;
+
                        rt_rq->rt_throttled = 1;
-                       printk_once(KERN_WARNING "sched: RT throttling activated\n");
+
+                       if (!once) {
+                               once = true;
+                               printk_sched("sched: RT throttling activated\n");
+                       }
                } else {
                        /*
                         * In case we did anyway, make it go away,