6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/proc_fs.h>
14 #include <linux/sched.h>
15 #include <linux/seq_file.h>
16 #include <linux/kallsyms.h>
17 #include <linux/utsname.h>
18 #include <linux/mempolicy.h>
22 static DEFINE_SPINLOCK(sched_debug_lock);
25 * This allows printing both to /proc/sched_debug and
28 #define SEQ_printf(m, x...) \
37 * Ease the printing of nsec fields:
39 static long long nsec_high(unsigned long long nsec)
41 if ((long long)nsec < 0) {
43 do_div(nsec, 1000000);
46 do_div(nsec, 1000000);
51 static unsigned long nsec_low(unsigned long long nsec)
53 if ((long long)nsec < 0)
56 return do_div(nsec, 1000000);
59 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
61 #ifdef CONFIG_FAIR_GROUP_SCHED
62 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
64 struct sched_entity *se = tg->se[cpu];
67 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
69 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
76 PN(se->sum_exec_runtime);
77 #ifdef CONFIG_SCHEDSTATS
78 PN(se->statistics.wait_start);
79 PN(se->statistics.sleep_start);
80 PN(se->statistics.block_start);
81 PN(se->statistics.sleep_max);
82 PN(se->statistics.block_max);
83 PN(se->statistics.exec_max);
84 PN(se->statistics.slice_max);
85 PN(se->statistics.wait_max);
86 PN(se->statistics.wait_sum);
87 P(se->statistics.wait_count);
99 #ifdef CONFIG_CGROUP_SCHED
100 static char group_path[PATH_MAX];
102 static char *task_group_path(struct task_group *tg)
104 if (autogroup_path(tg, group_path, PATH_MAX))
107 return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
112 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
119 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
120 p->comm, task_pid_nr(p),
121 SPLIT_NS(p->se.vruntime),
122 (long long)(p->nvcsw + p->nivcsw),
124 #ifdef CONFIG_SCHEDSTATS
125 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
126 SPLIT_NS(p->se.statistics.wait_sum),
127 SPLIT_NS(p->se.sum_exec_runtime),
128 SPLIT_NS(p->se.statistics.sum_sleep_runtime));
130 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
132 SPLIT_NS(p->se.sum_exec_runtime),
135 #ifdef CONFIG_NUMA_BALANCING
136 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
138 #ifdef CONFIG_CGROUP_SCHED
139 SEQ_printf(m, " %s", task_group_path(task_group(p)));
145 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
147 struct task_struct *g, *p;
150 "\nrunnable tasks:\n"
151 " task PID tree-key switches prio"
152 " wait-time sum-exec sum-sleep\n"
153 "------------------------------------------------------"
154 "----------------------------------------------------\n");
157 for_each_process_thread(g, p) {
158 if (task_cpu(p) != rq_cpu)
161 print_task(m, rq, p);
166 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
168 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
169 spread, rq0_min_vruntime, spread0;
170 struct rq *rq = cpu_rq(cpu);
171 struct sched_entity *last;
174 #ifdef CONFIG_FAIR_GROUP_SCHED
175 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
177 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
179 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
180 SPLIT_NS(cfs_rq->exec_clock));
182 raw_spin_lock_irqsave(&rq->lock, flags);
183 if (cfs_rq->rb_leftmost)
184 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
185 last = __pick_last_entity(cfs_rq);
187 max_vruntime = last->vruntime;
188 min_vruntime = cfs_rq->min_vruntime;
189 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
190 raw_spin_unlock_irqrestore(&rq->lock, flags);
191 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
192 SPLIT_NS(MIN_vruntime));
193 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
194 SPLIT_NS(min_vruntime));
195 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
196 SPLIT_NS(max_vruntime));
197 spread = max_vruntime - MIN_vruntime;
198 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
200 spread0 = min_vruntime - rq0_min_vruntime;
201 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
203 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
204 cfs_rq->nr_spread_over);
205 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
206 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
208 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
209 cfs_rq->avg.load_avg);
210 SEQ_printf(m, " .%-30s: %lu\n", "runnable_load_avg",
211 cfs_rq->runnable_load_avg);
212 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
213 cfs_rq->avg.util_avg);
214 SEQ_printf(m, " .%-30s: %ld\n", "removed_load_avg",
215 atomic_long_read(&cfs_rq->removed_load_avg));
216 SEQ_printf(m, " .%-30s: %ld\n", "removed_util_avg",
217 atomic_long_read(&cfs_rq->removed_util_avg));
218 #ifdef CONFIG_FAIR_GROUP_SCHED
219 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
220 cfs_rq->tg_load_avg_contrib);
221 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
222 atomic_long_read(&cfs_rq->tg->load_avg));
225 #ifdef CONFIG_CFS_BANDWIDTH
226 SEQ_printf(m, " .%-30s: %d\n", "throttled",
228 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
229 cfs_rq->throttle_count);
232 #ifdef CONFIG_FAIR_GROUP_SCHED
233 print_cfs_group_stats(m, cpu, cfs_rq->tg);
237 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
239 #ifdef CONFIG_RT_GROUP_SCHED
240 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
242 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
246 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
248 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
259 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
261 SEQ_printf(m, "\ndl_rq[%d]:\n", cpu);
262 SEQ_printf(m, " .%-30s: %ld\n", "dl_nr_running", dl_rq->dl_nr_running);
265 extern __read_mostly int sched_clock_running;
267 static void print_cpu(struct seq_file *m, int cpu)
269 struct rq *rq = cpu_rq(cpu);
274 unsigned int freq = cpu_khz ? : 1;
276 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
277 cpu, freq / 1000, (freq % 1000));
280 SEQ_printf(m, "cpu#%d\n", cpu);
285 if (sizeof(rq->x) == 4) \
286 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
288 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
292 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
295 SEQ_printf(m, " .%-30s: %lu\n", "load",
299 P(nr_uninterruptible);
301 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
312 #ifdef CONFIG_SCHEDSTATS
313 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
314 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
322 P64(max_idle_balance_cost);
331 spin_lock_irqsave(&sched_debug_lock, flags);
332 print_cfs_stats(m, cpu);
333 print_rt_stats(m, cpu);
334 print_dl_stats(m, cpu);
336 print_rq(m, rq, cpu);
337 spin_unlock_irqrestore(&sched_debug_lock, flags);
341 static const char *sched_tunable_scaling_names[] = {
347 static void sched_debug_header(struct seq_file *m)
349 u64 ktime, sched_clk, cpu_clk;
352 local_irq_save(flags);
353 ktime = ktime_to_ns(ktime_get());
354 sched_clk = sched_clock();
355 cpu_clk = local_clock();
356 local_irq_restore(flags);
358 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
359 init_utsname()->release,
360 (int)strcspn(init_utsname()->version, " "),
361 init_utsname()->version);
364 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
366 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
371 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
372 P(sched_clock_stable());
378 SEQ_printf(m, "sysctl_sched\n");
381 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
383 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
384 PN(sysctl_sched_latency);
385 PN(sysctl_sched_min_granularity);
386 PN(sysctl_sched_wakeup_granularity);
387 P(sysctl_sched_child_runs_first);
388 P(sysctl_sched_features);
392 SEQ_printf(m, " .%-40s: %d (%s)\n",
393 "sysctl_sched_tunable_scaling",
394 sysctl_sched_tunable_scaling,
395 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
399 static int sched_debug_show(struct seq_file *m, void *v)
401 int cpu = (unsigned long)(v - 2);
406 sched_debug_header(m);
411 void sysrq_sched_debug_show(void)
415 sched_debug_header(NULL);
416 for_each_online_cpu(cpu)
417 print_cpu(NULL, cpu);
422 * This itererator needs some explanation.
423 * It returns 1 for the header position.
424 * This means 2 is cpu 0.
425 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
426 * to use cpumask_* to iterate over the cpus.
428 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
430 unsigned long n = *offset;
438 n = cpumask_next(n - 1, cpu_online_mask);
440 n = cpumask_first(cpu_online_mask);
445 return (void *)(unsigned long)(n + 2);
449 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
452 return sched_debug_start(file, offset);
455 static void sched_debug_stop(struct seq_file *file, void *data)
459 static const struct seq_operations sched_debug_sops = {
460 .start = sched_debug_start,
461 .next = sched_debug_next,
462 .stop = sched_debug_stop,
463 .show = sched_debug_show,
466 static int sched_debug_release(struct inode *inode, struct file *file)
468 seq_release(inode, file);
473 static int sched_debug_open(struct inode *inode, struct file *filp)
477 ret = seq_open(filp, &sched_debug_sops);
482 static const struct file_operations sched_debug_fops = {
483 .open = sched_debug_open,
486 .release = sched_debug_release,
489 static int __init init_sched_debug_procfs(void)
491 struct proc_dir_entry *pe;
493 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
499 __initcall(init_sched_debug_procfs);
502 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
504 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
506 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
508 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
511 #ifdef CONFIG_NUMA_BALANCING
512 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
513 unsigned long tpf, unsigned long gsf, unsigned long gpf)
515 SEQ_printf(m, "numa_faults node=%d ", node);
516 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
517 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
522 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
524 #ifdef CONFIG_NUMA_BALANCING
525 struct mempolicy *pol;
528 P(mm->numa_scan_seq);
532 if (pol && !(pol->flags & MPOL_F_MORON))
537 P(numa_pages_migrated);
538 P(numa_preferred_nid);
539 P(total_numa_faults);
540 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
541 task_node(p), task_numa_group_id(p));
542 show_numa_stats(p, m);
547 void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
549 unsigned long nr_switches;
551 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
554 "---------------------------------------------------------"
557 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
559 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
561 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
563 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
567 PN(se.sum_exec_runtime);
569 nr_switches = p->nvcsw + p->nivcsw;
571 #ifdef CONFIG_SCHEDSTATS
572 PN(se.statistics.sum_sleep_runtime);
573 PN(se.statistics.wait_start);
574 PN(se.statistics.sleep_start);
575 PN(se.statistics.block_start);
576 PN(se.statistics.sleep_max);
577 PN(se.statistics.block_max);
578 PN(se.statistics.exec_max);
579 PN(se.statistics.slice_max);
580 PN(se.statistics.wait_max);
581 PN(se.statistics.wait_sum);
582 P(se.statistics.wait_count);
583 PN(se.statistics.iowait_sum);
584 P(se.statistics.iowait_count);
586 P(se.statistics.nr_migrations_cold);
587 P(se.statistics.nr_failed_migrations_affine);
588 P(se.statistics.nr_failed_migrations_running);
589 P(se.statistics.nr_failed_migrations_hot);
590 P(se.statistics.nr_forced_migrations);
591 P(se.statistics.nr_wakeups);
592 P(se.statistics.nr_wakeups_sync);
593 P(se.statistics.nr_wakeups_migrate);
594 P(se.statistics.nr_wakeups_local);
595 P(se.statistics.nr_wakeups_remote);
596 P(se.statistics.nr_wakeups_affine);
597 P(se.statistics.nr_wakeups_affine_attempts);
598 P(se.statistics.nr_wakeups_passive);
599 P(se.statistics.nr_wakeups_idle);
602 u64 avg_atom, avg_per_cpu;
604 avg_atom = p->se.sum_exec_runtime;
606 avg_atom = div64_ul(avg_atom, nr_switches);
610 avg_per_cpu = p->se.sum_exec_runtime;
611 if (p->se.nr_migrations) {
612 avg_per_cpu = div64_u64(avg_per_cpu,
613 p->se.nr_migrations);
623 SEQ_printf(m, "%-45s:%21Ld\n",
624 "nr_voluntary_switches", (long long)p->nvcsw);
625 SEQ_printf(m, "%-45s:%21Ld\n",
626 "nr_involuntary_switches", (long long)p->nivcsw);
634 P(se.avg.last_update_time);
644 unsigned int this_cpu = raw_smp_processor_id();
647 t0 = cpu_clock(this_cpu);
648 t1 = cpu_clock(this_cpu);
649 SEQ_printf(m, "%-45s:%21Ld\n",
650 "clock-delta", (long long)(t1-t0));
653 sched_show_numa(p, m);
656 void proc_sched_set_task(struct task_struct *p)
658 #ifdef CONFIG_SCHEDSTATS
659 memset(&p->se.statistics, 0, sizeof(p->se.statistics));