]> git.karo-electronics.de Git - karo-tx-linux.git/blob - kernel/sched/debug.c
Merge remote-tracking branch 'signal/for-next'
[karo-tx-linux.git] / kernel / sched / debug.c
1 /*
2  * kernel/sched/debug.c
3  *
4  * Print the CFS rbtree
5  *
6  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/proc_fs.h>
14 #include <linux/sched.h>
15 #include <linux/seq_file.h>
16 #include <linux/kallsyms.h>
17 #include <linux/utsname.h>
18
19 #include "sched.h"
20
21 static DEFINE_SPINLOCK(sched_debug_lock);
22
23 /*
24  * This allows printing both to /proc/sched_debug and
25  * to the console
26  */
27 #define SEQ_printf(m, x...)                     \
28  do {                                           \
29         if (m)                                  \
30                 seq_printf(m, x);               \
31         else                                    \
32                 printk(x);                      \
33  } while (0)
34
35 /*
36  * Ease the printing of nsec fields:
37  */
38 static long long nsec_high(unsigned long long nsec)
39 {
40         if ((long long)nsec < 0) {
41                 nsec = -nsec;
42                 do_div(nsec, 1000000);
43                 return -nsec;
44         }
45         do_div(nsec, 1000000);
46
47         return nsec;
48 }
49
50 static unsigned long nsec_low(unsigned long long nsec)
51 {
52         if ((long long)nsec < 0)
53                 nsec = -nsec;
54
55         return do_div(nsec, 1000000);
56 }
57
58 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
59
60 #ifdef CONFIG_FAIR_GROUP_SCHED
61 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
62 {
63         struct sched_entity *se = tg->se[cpu];
64
65 #define P(F) \
66         SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)F)
67 #define PN(F) \
68         SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
69
70         if (!se) {
71                 struct sched_avg *avg = &cpu_rq(cpu)->avg;
72                 P(avg->runnable_avg_sum);
73                 P(avg->runnable_avg_period);
74                 return;
75         }
76
77
78         PN(se->exec_start);
79         PN(se->vruntime);
80         PN(se->sum_exec_runtime);
81 #ifdef CONFIG_SCHEDSTATS
82         PN(se->statistics.wait_start);
83         PN(se->statistics.sleep_start);
84         PN(se->statistics.block_start);
85         PN(se->statistics.sleep_max);
86         PN(se->statistics.block_max);
87         PN(se->statistics.exec_max);
88         PN(se->statistics.slice_max);
89         PN(se->statistics.wait_max);
90         PN(se->statistics.wait_sum);
91         P(se->statistics.wait_count);
92 #endif
93         P(se->load.weight);
94 #ifdef CONFIG_SMP
95         P(se->avg.runnable_avg_sum);
96         P(se->avg.runnable_avg_period);
97         P(se->avg.load_avg_contrib);
98         P(se->avg.decay_count);
99 #endif
100 #undef PN
101 #undef P
102 }
103 #endif
104
105 #ifdef CONFIG_CGROUP_SCHED
106 static char group_path[PATH_MAX];
107
108 static char *task_group_path(struct task_group *tg)
109 {
110         if (autogroup_path(tg, group_path, PATH_MAX))
111                 return group_path;
112
113         /*
114          * May be NULL if the underlying cgroup isn't fully-created yet
115          */
116         if (!tg->css.cgroup) {
117                 group_path[0] = '\0';
118                 return group_path;
119         }
120         cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
121         return group_path;
122 }
123 #endif
124
125 static void
126 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
127 {
128         if (rq->curr == p)
129                 SEQ_printf(m, "R");
130         else
131                 SEQ_printf(m, " ");
132
133         SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
134                 p->comm, p->pid,
135                 SPLIT_NS(p->se.vruntime),
136                 (long long)(p->nvcsw + p->nivcsw),
137                 p->prio);
138 #ifdef CONFIG_SCHEDSTATS
139         SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
140                 SPLIT_NS(p->se.vruntime),
141                 SPLIT_NS(p->se.sum_exec_runtime),
142                 SPLIT_NS(p->se.statistics.sum_sleep_runtime));
143 #else
144         SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
145                 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
146 #endif
147 #ifdef CONFIG_SCHED_NUMA
148         SEQ_printf(m, " %d/%d", p->node, cpu_to_node(task_cpu(p)));
149 #endif
150 #ifdef CONFIG_CGROUP_SCHED
151         SEQ_printf(m, " %s", task_group_path(task_group(p)));
152 #endif
153
154         SEQ_printf(m, "\n");
155 }
156
157 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
158 {
159         struct task_struct *g, *p;
160         unsigned long flags;
161
162         SEQ_printf(m,
163         "\nrunnable tasks:\n"
164         "            task   PID         tree-key  switches  prio"
165         "     exec-runtime         sum-exec        sum-sleep\n"
166         "------------------------------------------------------"
167         "----------------------------------------------------\n");
168
169         read_lock_irqsave(&tasklist_lock, flags);
170
171         do_each_thread(g, p) {
172                 if (!p->on_rq || task_cpu(p) != rq_cpu)
173                         continue;
174
175                 print_task(m, rq, p);
176         } while_each_thread(g, p);
177
178         read_unlock_irqrestore(&tasklist_lock, flags);
179 }
180
181 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
182 {
183         s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
184                 spread, rq0_min_vruntime, spread0;
185         struct rq *rq = cpu_rq(cpu);
186         struct sched_entity *last;
187         unsigned long flags;
188
189 #ifdef CONFIG_FAIR_GROUP_SCHED
190         SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
191 #else
192         SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
193 #endif
194         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
195                         SPLIT_NS(cfs_rq->exec_clock));
196
197         raw_spin_lock_irqsave(&rq->lock, flags);
198         if (cfs_rq->rb_leftmost)
199                 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
200         last = __pick_last_entity(cfs_rq);
201         if (last)
202                 max_vruntime = last->vruntime;
203         min_vruntime = cfs_rq->min_vruntime;
204         rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
205         raw_spin_unlock_irqrestore(&rq->lock, flags);
206         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
207                         SPLIT_NS(MIN_vruntime));
208         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
209                         SPLIT_NS(min_vruntime));
210         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
211                         SPLIT_NS(max_vruntime));
212         spread = max_vruntime - MIN_vruntime;
213         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
214                         SPLIT_NS(spread));
215         spread0 = min_vruntime - rq0_min_vruntime;
216         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
217                         SPLIT_NS(spread0));
218         SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
219                         cfs_rq->nr_spread_over);
220         SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
221         SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
222 #ifdef CONFIG_FAIR_GROUP_SCHED
223 #ifdef CONFIG_SMP
224         SEQ_printf(m, "  .%-30s: %lld\n", "runnable_load_avg",
225                         cfs_rq->runnable_load_avg);
226         SEQ_printf(m, "  .%-30s: %lld\n", "blocked_load_avg",
227                         cfs_rq->blocked_load_avg);
228         SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
229                         atomic64_read(&cfs_rq->tg->load_avg));
230         SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_contrib",
231                         cfs_rq->tg_load_contrib);
232         SEQ_printf(m, "  .%-30s: %d\n", "tg_runnable_contrib",
233                         cfs_rq->tg_runnable_contrib);
234         SEQ_printf(m, "  .%-30s: %d\n", "tg->runnable_avg",
235                         atomic_read(&cfs_rq->tg->runnable_avg));
236 #endif
237
238         print_cfs_group_stats(m, cpu, cfs_rq->tg);
239 #endif
240 }
241
242 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
243 {
244 #ifdef CONFIG_RT_GROUP_SCHED
245         SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
246 #else
247         SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
248 #endif
249
250 #define P(x) \
251         SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
252 #define PN(x) \
253         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
254
255         P(rt_nr_running);
256         P(rt_throttled);
257         PN(rt_time);
258         PN(rt_runtime);
259
260 #undef PN
261 #undef P
262 }
263
264 extern __read_mostly int sched_clock_running;
265
266 static void print_cpu(struct seq_file *m, int cpu)
267 {
268         struct rq *rq = cpu_rq(cpu);
269         unsigned long flags;
270
271 #ifdef CONFIG_X86
272         {
273                 unsigned int freq = cpu_khz ? : 1;
274
275                 SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n",
276                            cpu, freq / 1000, (freq % 1000));
277         }
278 #else
279         SEQ_printf(m, "\ncpu#%d\n", cpu);
280 #endif
281
282 #define P(x)                                                            \
283 do {                                                                    \
284         if (sizeof(rq->x) == 4)                                         \
285                 SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));    \
286         else                                                            \
287                 SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
288 } while (0)
289
290 #define PN(x) \
291         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
292
293         P(nr_running);
294         SEQ_printf(m, "  .%-30s: %lu\n", "load",
295                    rq->load.weight);
296         P(nr_switches);
297         P(nr_load_updates);
298         P(nr_uninterruptible);
299         PN(next_balance);
300         P(curr->pid);
301         PN(clock);
302         P(cpu_load[0]);
303         P(cpu_load[1]);
304         P(cpu_load[2]);
305         P(cpu_load[3]);
306         P(cpu_load[4]);
307 #undef P
308 #undef PN
309
310 #ifdef CONFIG_SCHEDSTATS
311 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
312 #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
313
314         P(yld_count);
315
316         P(sched_count);
317         P(sched_goidle);
318 #ifdef CONFIG_SMP
319         P64(avg_idle);
320 #endif
321
322         P(ttwu_count);
323         P(ttwu_local);
324
325 #undef P
326 #undef P64
327 #endif
328         spin_lock_irqsave(&sched_debug_lock, flags);
329         print_cfs_stats(m, cpu);
330         print_rt_stats(m, cpu);
331
332         rcu_read_lock();
333         print_rq(m, rq, cpu);
334         rcu_read_unlock();
335         spin_unlock_irqrestore(&sched_debug_lock, flags);
336 }
337
338 static const char *sched_tunable_scaling_names[] = {
339         "none",
340         "logaritmic",
341         "linear"
342 };
343
344 static int sched_debug_show(struct seq_file *m, void *v)
345 {
346         u64 ktime, sched_clk, cpu_clk;
347         unsigned long flags;
348         int cpu;
349
350         local_irq_save(flags);
351         ktime = ktime_to_ns(ktime_get());
352         sched_clk = sched_clock();
353         cpu_clk = local_clock();
354         local_irq_restore(flags);
355
356         SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
357                 init_utsname()->release,
358                 (int)strcspn(init_utsname()->version, " "),
359                 init_utsname()->version);
360
361 #define P(x) \
362         SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
363 #define PN(x) \
364         SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
365         PN(ktime);
366         PN(sched_clk);
367         PN(cpu_clk);
368         P(jiffies);
369 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
370         P(sched_clock_stable);
371 #endif
372 #undef PN
373 #undef P
374
375         SEQ_printf(m, "\n");
376         SEQ_printf(m, "sysctl_sched\n");
377
378 #define P(x) \
379         SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
380 #define PN(x) \
381         SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
382         PN(sysctl_sched_latency);
383         PN(sysctl_sched_min_granularity);
384         PN(sysctl_sched_wakeup_granularity);
385         P(sysctl_sched_child_runs_first);
386         P(sysctl_sched_features);
387 #undef PN
388 #undef P
389
390         SEQ_printf(m, "  .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling",
391                 sysctl_sched_tunable_scaling,
392                 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
393
394         for_each_online_cpu(cpu)
395                 print_cpu(m, cpu);
396
397         SEQ_printf(m, "\n");
398
399         return 0;
400 }
401
402 void sysrq_sched_debug_show(void)
403 {
404         sched_debug_show(NULL, NULL);
405 }
406
407 static int sched_debug_open(struct inode *inode, struct file *filp)
408 {
409         return single_open(filp, sched_debug_show, NULL);
410 }
411
412 static const struct file_operations sched_debug_fops = {
413         .open           = sched_debug_open,
414         .read           = seq_read,
415         .llseek         = seq_lseek,
416         .release        = single_release,
417 };
418
419 static int __init init_sched_debug_procfs(void)
420 {
421         struct proc_dir_entry *pe;
422
423         pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
424         if (!pe)
425                 return -ENOMEM;
426         return 0;
427 }
428
429 __initcall(init_sched_debug_procfs);
430
431 void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
432 {
433         unsigned long nr_switches;
434
435         SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
436                                                 get_nr_threads(p));
437         SEQ_printf(m,
438                 "---------------------------------------------------------\n");
439 #define __P(F) \
440         SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
441 #define P(F) \
442         SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
443 #define __PN(F) \
444         SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
445 #define PN(F) \
446         SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
447
448         PN(se.exec_start);
449         PN(se.vruntime);
450         PN(se.sum_exec_runtime);
451
452         nr_switches = p->nvcsw + p->nivcsw;
453
454 #ifdef CONFIG_SCHEDSTATS
455         PN(se.statistics.wait_start);
456         PN(se.statistics.sleep_start);
457         PN(se.statistics.block_start);
458         PN(se.statistics.sleep_max);
459         PN(se.statistics.block_max);
460         PN(se.statistics.exec_max);
461         PN(se.statistics.slice_max);
462         PN(se.statistics.wait_max);
463         PN(se.statistics.wait_sum);
464         P(se.statistics.wait_count);
465         PN(se.statistics.iowait_sum);
466         P(se.statistics.iowait_count);
467         P(se.nr_migrations);
468         P(se.statistics.nr_migrations_cold);
469         P(se.statistics.nr_failed_migrations_affine);
470         P(se.statistics.nr_failed_migrations_running);
471         P(se.statistics.nr_failed_migrations_hot);
472         P(se.statistics.nr_forced_migrations);
473         P(se.statistics.nr_wakeups);
474         P(se.statistics.nr_wakeups_sync);
475         P(se.statistics.nr_wakeups_migrate);
476         P(se.statistics.nr_wakeups_local);
477         P(se.statistics.nr_wakeups_remote);
478         P(se.statistics.nr_wakeups_affine);
479         P(se.statistics.nr_wakeups_affine_attempts);
480         P(se.statistics.nr_wakeups_passive);
481         P(se.statistics.nr_wakeups_idle);
482
483         {
484                 u64 avg_atom, avg_per_cpu;
485
486                 avg_atom = p->se.sum_exec_runtime;
487                 if (nr_switches)
488                         do_div(avg_atom, nr_switches);
489                 else
490                         avg_atom = -1LL;
491
492                 avg_per_cpu = p->se.sum_exec_runtime;
493                 if (p->se.nr_migrations) {
494                         avg_per_cpu = div64_u64(avg_per_cpu,
495                                                 p->se.nr_migrations);
496                 } else {
497                         avg_per_cpu = -1LL;
498                 }
499
500                 __PN(avg_atom);
501                 __PN(avg_per_cpu);
502         }
503 #endif
504         __P(nr_switches);
505         SEQ_printf(m, "%-35s:%21Ld\n",
506                    "nr_voluntary_switches", (long long)p->nvcsw);
507         SEQ_printf(m, "%-35s:%21Ld\n",
508                    "nr_involuntary_switches", (long long)p->nivcsw);
509
510         P(se.load.weight);
511         P(policy);
512         P(prio);
513 #undef PN
514 #undef __PN
515 #undef P
516 #undef __P
517
518         {
519                 unsigned int this_cpu = raw_smp_processor_id();
520                 u64 t0, t1;
521
522                 t0 = cpu_clock(this_cpu);
523                 t1 = cpu_clock(this_cpu);
524                 SEQ_printf(m, "%-35s:%21Ld\n",
525                            "clock-delta", (long long)(t1-t0));
526         }
527 }
528
529 void proc_sched_set_task(struct task_struct *p)
530 {
531 #ifdef CONFIG_SCHEDSTATS
532         memset(&p->se.statistics, 0, sizeof(p->se.statistics));
533 #endif
534 }