2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
23 #include <linux/sched/mm.h>
24 #include <linux/sched/topology.h>
26 #include <linux/latencytop.h>
27 #include <linux/cpumask.h>
28 #include <linux/cpuidle.h>
29 #include <linux/slab.h>
30 #include <linux/profile.h>
31 #include <linux/interrupt.h>
32 #include <linux/mempolicy.h>
33 #include <linux/migrate.h>
34 #include <linux/task_work.h>
36 #include <trace/events/sched.h>
41 * Targeted preemption latency for CPU-bound tasks:
43 * NOTE: this latency value is not the same as the concept of
44 * 'timeslice length' - timeslices in CFS are of variable length
45 * and have no persistent notion like in traditional, time-slice
46 * based scheduling concepts.
48 * (to see the precise effective timeslice length of your workload,
49 * run vmstat and monitor the context-switches (cs) field)
51 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
53 unsigned int sysctl_sched_latency = 6000000ULL;
54 unsigned int normalized_sysctl_sched_latency = 6000000ULL;
57 * The initial- and re-scaling of tunables is configurable
61 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
62 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
63 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
65 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
67 enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
70 * Minimal preemption granularity for CPU-bound tasks:
72 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
74 unsigned int sysctl_sched_min_granularity = 750000ULL;
75 unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
78 * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
80 static unsigned int sched_nr_latency = 8;
83 * After fork, child runs first. If set to 0 (default) then
84 * parent will (try to) run first.
86 unsigned int sysctl_sched_child_runs_first __read_mostly;
89 * SCHED_OTHER wake-up granularity.
91 * This option delays the preemption effects of decoupled workloads
92 * and reduces their over-scheduling. Synchronous workloads will still
93 * have immediate wakeup/sleep latencies.
95 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
97 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
98 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
100 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
104 * For asym packing, by default the lower numbered cpu has higher priority.
106 int __weak arch_asym_cpu_priority(int cpu)
112 #ifdef CONFIG_CFS_BANDWIDTH
114 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
115 * each time a cfs_rq requests quota.
117 * Note: in the case that the slice exceeds the runtime remaining (either due
118 * to consumption or the quota being specified to be smaller than the slice)
119 * we will always only issue the remaining available time.
121 * (default: 5 msec, units: microseconds)
123 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
127 * The margin used when comparing utilization with CPU capacity:
128 * util * margin < capacity * 1024
132 unsigned int capacity_margin = 1280;
134 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
140 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
146 static inline void update_load_set(struct load_weight *lw, unsigned long w)
153 * Increase the granularity value when there are more CPUs,
154 * because with more CPUs the 'effective latency' as visible
155 * to users decreases. But the relationship is not linear,
156 * so pick a second-best guess by going with the log2 of the
159 * This idea comes from the SD scheduler of Con Kolivas:
161 static unsigned int get_update_sysctl_factor(void)
163 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
166 switch (sysctl_sched_tunable_scaling) {
167 case SCHED_TUNABLESCALING_NONE:
170 case SCHED_TUNABLESCALING_LINEAR:
173 case SCHED_TUNABLESCALING_LOG:
175 factor = 1 + ilog2(cpus);
182 static void update_sysctl(void)
184 unsigned int factor = get_update_sysctl_factor();
186 #define SET_SYSCTL(name) \
187 (sysctl_##name = (factor) * normalized_sysctl_##name)
188 SET_SYSCTL(sched_min_granularity);
189 SET_SYSCTL(sched_latency);
190 SET_SYSCTL(sched_wakeup_granularity);
194 void sched_init_granularity(void)
199 #define WMULT_CONST (~0U)
200 #define WMULT_SHIFT 32
202 static void __update_inv_weight(struct load_weight *lw)
206 if (likely(lw->inv_weight))
209 w = scale_load_down(lw->weight);
211 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
213 else if (unlikely(!w))
214 lw->inv_weight = WMULT_CONST;
216 lw->inv_weight = WMULT_CONST / w;
220 * delta_exec * weight / lw.weight
222 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
224 * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
225 * we're guaranteed shift stays positive because inv_weight is guaranteed to
226 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
228 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
229 * weight/lw.weight <= 1, and therefore our shift will also be positive.
231 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
233 u64 fact = scale_load_down(weight);
234 int shift = WMULT_SHIFT;
236 __update_inv_weight(lw);
238 if (unlikely(fact >> 32)) {
245 /* hint to use a 32x32->64 mul */
246 fact = (u64)(u32)fact * lw->inv_weight;
253 return mul_u64_u32_shr(delta_exec, fact, shift);
257 const struct sched_class fair_sched_class;
259 /**************************************************************
260 * CFS operations on generic schedulable entities:
263 #ifdef CONFIG_FAIR_GROUP_SCHED
265 /* cpu runqueue to which this cfs_rq is attached */
266 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
271 /* An entity is a task if it doesn't "own" a runqueue */
272 #define entity_is_task(se) (!se->my_q)
274 static inline struct task_struct *task_of(struct sched_entity *se)
276 SCHED_WARN_ON(!entity_is_task(se));
277 return container_of(se, struct task_struct, se);
280 /* Walk up scheduling entities hierarchy */
281 #define for_each_sched_entity(se) \
282 for (; se; se = se->parent)
284 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
289 /* runqueue on which this entity is (to be) queued */
290 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
295 /* runqueue "owned" by this group */
296 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
301 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
303 if (!cfs_rq->on_list) {
304 struct rq *rq = rq_of(cfs_rq);
305 int cpu = cpu_of(rq);
307 * Ensure we either appear before our parent (if already
308 * enqueued) or force our parent to appear after us when it is
309 * enqueued. The fact that we always enqueue bottom-up
310 * reduces this to two cases and a special case for the root
311 * cfs_rq. Furthermore, it also means that we will always reset
312 * tmp_alone_branch either when the branch is connected
313 * to a tree or when we reach the beg of the tree
315 if (cfs_rq->tg->parent &&
316 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
318 * If parent is already on the list, we add the child
319 * just before. Thanks to circular linked property of
320 * the list, this means to put the child at the tail
321 * of the list that starts by parent.
323 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
324 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
326 * The branch is now connected to its tree so we can
327 * reset tmp_alone_branch to the beginning of the
330 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
331 } else if (!cfs_rq->tg->parent) {
333 * cfs rq without parent should be put
334 * at the tail of the list.
336 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
337 &rq->leaf_cfs_rq_list);
339 * We have reach the beg of a tree so we can reset
340 * tmp_alone_branch to the beginning of the list.
342 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
345 * The parent has not already been added so we want to
346 * make sure that it will be put after us.
347 * tmp_alone_branch points to the beg of the branch
348 * where we will add parent.
350 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
351 rq->tmp_alone_branch);
353 * update tmp_alone_branch to points to the new beg
356 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
363 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
365 if (cfs_rq->on_list) {
366 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
371 /* Iterate thr' all leaf cfs_rq's on a runqueue */
372 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
373 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
375 /* Do the two (enqueued) entities belong to the same group ? */
376 static inline struct cfs_rq *
377 is_same_group(struct sched_entity *se, struct sched_entity *pse)
379 if (se->cfs_rq == pse->cfs_rq)
385 static inline struct sched_entity *parent_entity(struct sched_entity *se)
391 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
393 int se_depth, pse_depth;
396 * preemption test can be made between sibling entities who are in the
397 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
398 * both tasks until we find their ancestors who are siblings of common
402 /* First walk up until both entities are at same depth */
403 se_depth = (*se)->depth;
404 pse_depth = (*pse)->depth;
406 while (se_depth > pse_depth) {
408 *se = parent_entity(*se);
411 while (pse_depth > se_depth) {
413 *pse = parent_entity(*pse);
416 while (!is_same_group(*se, *pse)) {
417 *se = parent_entity(*se);
418 *pse = parent_entity(*pse);
422 #else /* !CONFIG_FAIR_GROUP_SCHED */
424 static inline struct task_struct *task_of(struct sched_entity *se)
426 return container_of(se, struct task_struct, se);
429 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
431 return container_of(cfs_rq, struct rq, cfs);
434 #define entity_is_task(se) 1
436 #define for_each_sched_entity(se) \
437 for (; se; se = NULL)
439 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
441 return &task_rq(p)->cfs;
444 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
446 struct task_struct *p = task_of(se);
447 struct rq *rq = task_rq(p);
452 /* runqueue "owned" by this group */
453 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
458 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
462 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
466 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
467 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
469 static inline struct sched_entity *parent_entity(struct sched_entity *se)
475 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
479 #endif /* CONFIG_FAIR_GROUP_SCHED */
481 static __always_inline
482 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
484 /**************************************************************
485 * Scheduling class tree data structure manipulation methods:
488 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
490 s64 delta = (s64)(vruntime - max_vruntime);
492 max_vruntime = vruntime;
497 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
499 s64 delta = (s64)(vruntime - min_vruntime);
501 min_vruntime = vruntime;
506 static inline int entity_before(struct sched_entity *a,
507 struct sched_entity *b)
509 return (s64)(a->vruntime - b->vruntime) < 0;
512 static void update_min_vruntime(struct cfs_rq *cfs_rq)
514 struct sched_entity *curr = cfs_rq->curr;
516 u64 vruntime = cfs_rq->min_vruntime;
520 vruntime = curr->vruntime;
525 if (cfs_rq->rb_leftmost) {
526 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
531 vruntime = se->vruntime;
533 vruntime = min_vruntime(vruntime, se->vruntime);
536 /* ensure we never gain time by being placed backwards. */
537 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
540 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
545 * Enqueue an entity into the rb-tree:
547 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
549 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
550 struct rb_node *parent = NULL;
551 struct sched_entity *entry;
555 * Find the right place in the rbtree:
559 entry = rb_entry(parent, struct sched_entity, run_node);
561 * We dont care about collisions. Nodes with
562 * the same key stay together.
564 if (entity_before(se, entry)) {
565 link = &parent->rb_left;
567 link = &parent->rb_right;
573 * Maintain a cache of leftmost tree entries (it is frequently
577 cfs_rq->rb_leftmost = &se->run_node;
579 rb_link_node(&se->run_node, parent, link);
580 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
583 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
585 if (cfs_rq->rb_leftmost == &se->run_node) {
586 struct rb_node *next_node;
588 next_node = rb_next(&se->run_node);
589 cfs_rq->rb_leftmost = next_node;
592 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
595 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
597 struct rb_node *left = cfs_rq->rb_leftmost;
602 return rb_entry(left, struct sched_entity, run_node);
605 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
607 struct rb_node *next = rb_next(&se->run_node);
612 return rb_entry(next, struct sched_entity, run_node);
615 #ifdef CONFIG_SCHED_DEBUG
616 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
618 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
623 return rb_entry(last, struct sched_entity, run_node);
626 /**************************************************************
627 * Scheduling class statistics methods:
630 int sched_proc_update_handler(struct ctl_table *table, int write,
631 void __user *buffer, size_t *lenp,
634 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
635 unsigned int factor = get_update_sysctl_factor();
640 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
641 sysctl_sched_min_granularity);
643 #define WRT_SYSCTL(name) \
644 (normalized_sysctl_##name = sysctl_##name / (factor))
645 WRT_SYSCTL(sched_min_granularity);
646 WRT_SYSCTL(sched_latency);
647 WRT_SYSCTL(sched_wakeup_granularity);
657 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
659 if (unlikely(se->load.weight != NICE_0_LOAD))
660 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
666 * The idea is to set a period in which each task runs once.
668 * When there are too many tasks (sched_nr_latency) we have to stretch
669 * this period because otherwise the slices get too small.
671 * p = (nr <= nl) ? l : l*nr/nl
673 static u64 __sched_period(unsigned long nr_running)
675 if (unlikely(nr_running > sched_nr_latency))
676 return nr_running * sysctl_sched_min_granularity;
678 return sysctl_sched_latency;
682 * We calculate the wall-time slice from the period by taking a part
683 * proportional to the weight.
687 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
689 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
691 for_each_sched_entity(se) {
692 struct load_weight *load;
693 struct load_weight lw;
695 cfs_rq = cfs_rq_of(se);
696 load = &cfs_rq->load;
698 if (unlikely(!se->on_rq)) {
701 update_load_add(&lw, se->load.weight);
704 slice = __calc_delta(slice, se->load.weight, load);
710 * We calculate the vruntime slice of a to-be-inserted task.
714 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
716 return calc_delta_fair(sched_slice(cfs_rq, se), se);
720 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
721 static unsigned long task_h_load(struct task_struct *p);
724 * We choose a half-life close to 1 scheduling period.
725 * Note: The tables runnable_avg_yN_inv and runnable_avg_yN_sum are
726 * dependent on this value.
728 #define LOAD_AVG_PERIOD 32
729 #define LOAD_AVG_MAX 47742 /* maximum possible load avg */
730 #define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_AVG_MAX */
732 /* Give new sched_entity start runnable values to heavy its load in infant time */
733 void init_entity_runnable_average(struct sched_entity *se)
735 struct sched_avg *sa = &se->avg;
737 sa->last_update_time = 0;
739 * sched_avg's period_contrib should be strictly less then 1024, so
740 * we give it 1023 to make sure it is almost a period (1024us), and
741 * will definitely be update (after enqueue).
743 sa->period_contrib = 1023;
745 * Tasks are intialized with full load to be seen as heavy tasks until
746 * they get a chance to stabilize to their real load level.
747 * Group entities are intialized with zero load to reflect the fact that
748 * nothing has been attached to the task group yet.
750 if (entity_is_task(se))
751 sa->load_avg = scale_load_down(se->load.weight);
752 sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
754 * At this point, util_avg won't be used in select_task_rq_fair anyway
758 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
761 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
762 static void attach_entity_cfs_rq(struct sched_entity *se);
765 * With new tasks being created, their initial util_avgs are extrapolated
766 * based on the cfs_rq's current util_avg:
768 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
770 * However, in many cases, the above util_avg does not give a desired
771 * value. Moreover, the sum of the util_avgs may be divergent, such
772 * as when the series is a harmonic series.
774 * To solve this problem, we also cap the util_avg of successive tasks to
775 * only 1/2 of the left utilization budget:
777 * util_avg_cap = (1024 - cfs_rq->avg.util_avg) / 2^n
779 * where n denotes the nth task.
781 * For example, a simplest series from the beginning would be like:
783 * task util_avg: 512, 256, 128, 64, 32, 16, 8, ...
784 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
786 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
787 * if util_avg > util_avg_cap.
789 void post_init_entity_util_avg(struct sched_entity *se)
791 struct cfs_rq *cfs_rq = cfs_rq_of(se);
792 struct sched_avg *sa = &se->avg;
793 long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
796 if (cfs_rq->avg.util_avg != 0) {
797 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
798 sa->util_avg /= (cfs_rq->avg.load_avg + 1);
800 if (sa->util_avg > cap)
805 sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
808 if (entity_is_task(se)) {
809 struct task_struct *p = task_of(se);
810 if (p->sched_class != &fair_sched_class) {
812 * For !fair tasks do:
814 update_cfs_rq_load_avg(now, cfs_rq, false);
815 attach_entity_load_avg(cfs_rq, se);
816 switched_from_fair(rq, p);
818 * such that the next switched_to_fair() has the
821 se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);
826 attach_entity_cfs_rq(se);
829 #else /* !CONFIG_SMP */
830 void init_entity_runnable_average(struct sched_entity *se)
833 void post_init_entity_util_avg(struct sched_entity *se)
836 static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
839 #endif /* CONFIG_SMP */
842 * Update the current task's runtime statistics.
844 static void update_curr(struct cfs_rq *cfs_rq)
846 struct sched_entity *curr = cfs_rq->curr;
847 u64 now = rq_clock_task(rq_of(cfs_rq));
853 delta_exec = now - curr->exec_start;
854 if (unlikely((s64)delta_exec <= 0))
857 curr->exec_start = now;
859 schedstat_set(curr->statistics.exec_max,
860 max(delta_exec, curr->statistics.exec_max));
862 curr->sum_exec_runtime += delta_exec;
863 schedstat_add(cfs_rq->exec_clock, delta_exec);
865 curr->vruntime += calc_delta_fair(delta_exec, curr);
866 update_min_vruntime(cfs_rq);
868 if (entity_is_task(curr)) {
869 struct task_struct *curtask = task_of(curr);
871 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
872 cpuacct_charge(curtask, delta_exec);
873 account_group_exec_runtime(curtask, delta_exec);
876 account_cfs_rq_runtime(cfs_rq, delta_exec);
879 static void update_curr_fair(struct rq *rq)
881 update_curr(cfs_rq_of(&rq->curr->se));
885 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
887 u64 wait_start, prev_wait_start;
889 if (!schedstat_enabled())
892 wait_start = rq_clock(rq_of(cfs_rq));
893 prev_wait_start = schedstat_val(se->statistics.wait_start);
895 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
896 likely(wait_start > prev_wait_start))
897 wait_start -= prev_wait_start;
899 schedstat_set(se->statistics.wait_start, wait_start);
903 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
905 struct task_struct *p;
908 if (!schedstat_enabled())
911 delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
913 if (entity_is_task(se)) {
915 if (task_on_rq_migrating(p)) {
917 * Preserve migrating task's wait time so wait_start
918 * time stamp can be adjusted to accumulate wait time
919 * prior to migration.
921 schedstat_set(se->statistics.wait_start, delta);
924 trace_sched_stat_wait(p, delta);
927 schedstat_set(se->statistics.wait_max,
928 max(schedstat_val(se->statistics.wait_max), delta));
929 schedstat_inc(se->statistics.wait_count);
930 schedstat_add(se->statistics.wait_sum, delta);
931 schedstat_set(se->statistics.wait_start, 0);
935 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
937 struct task_struct *tsk = NULL;
938 u64 sleep_start, block_start;
940 if (!schedstat_enabled())
943 sleep_start = schedstat_val(se->statistics.sleep_start);
944 block_start = schedstat_val(se->statistics.block_start);
946 if (entity_is_task(se))
950 u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start;
955 if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
956 schedstat_set(se->statistics.sleep_max, delta);
958 schedstat_set(se->statistics.sleep_start, 0);
959 schedstat_add(se->statistics.sum_sleep_runtime, delta);
962 account_scheduler_latency(tsk, delta >> 10, 1);
963 trace_sched_stat_sleep(tsk, delta);
967 u64 delta = rq_clock(rq_of(cfs_rq)) - block_start;
972 if (unlikely(delta > schedstat_val(se->statistics.block_max)))
973 schedstat_set(se->statistics.block_max, delta);
975 schedstat_set(se->statistics.block_start, 0);
976 schedstat_add(se->statistics.sum_sleep_runtime, delta);
979 if (tsk->in_iowait) {
980 schedstat_add(se->statistics.iowait_sum, delta);
981 schedstat_inc(se->statistics.iowait_count);
982 trace_sched_stat_iowait(tsk, delta);
985 trace_sched_stat_blocked(tsk, delta);
988 * Blocking time is in units of nanosecs, so shift by
989 * 20 to get a milliseconds-range estimation of the
990 * amount of time that the task spent sleeping:
992 if (unlikely(prof_on == SLEEP_PROFILING)) {
993 profile_hits(SLEEP_PROFILING,
994 (void *)get_wchan(tsk),
997 account_scheduler_latency(tsk, delta >> 10, 0);
1003 * Task is being enqueued - update stats:
1006 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1008 if (!schedstat_enabled())
1012 * Are we enqueueing a waiting task? (for current tasks
1013 * a dequeue/enqueue event is a NOP)
1015 if (se != cfs_rq->curr)
1016 update_stats_wait_start(cfs_rq, se);
1018 if (flags & ENQUEUE_WAKEUP)
1019 update_stats_enqueue_sleeper(cfs_rq, se);
1023 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1026 if (!schedstat_enabled())
1030 * Mark the end of the wait period if dequeueing a
1033 if (se != cfs_rq->curr)
1034 update_stats_wait_end(cfs_rq, se);
1036 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
1037 struct task_struct *tsk = task_of(se);
1039 if (tsk->state & TASK_INTERRUPTIBLE)
1040 schedstat_set(se->statistics.sleep_start,
1041 rq_clock(rq_of(cfs_rq)));
1042 if (tsk->state & TASK_UNINTERRUPTIBLE)
1043 schedstat_set(se->statistics.block_start,
1044 rq_clock(rq_of(cfs_rq)));
1049 * We are picking a new current task - update its stats:
1052 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
1055 * We are starting a new run period:
1057 se->exec_start = rq_clock_task(rq_of(cfs_rq));
1060 /**************************************************
1061 * Scheduling class queueing methods:
1064 #ifdef CONFIG_NUMA_BALANCING
1066 * Approximate time to scan a full NUMA task in ms. The task scan period is
1067 * calculated based on the tasks virtual memory size and
1068 * numa_balancing_scan_size.
1070 unsigned int sysctl_numa_balancing_scan_period_min = 1000;
1071 unsigned int sysctl_numa_balancing_scan_period_max = 60000;
1073 /* Portion of address space to scan in MB */
1074 unsigned int sysctl_numa_balancing_scan_size = 256;
1076 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
1077 unsigned int sysctl_numa_balancing_scan_delay = 1000;
1079 static unsigned int task_nr_scan_windows(struct task_struct *p)
1081 unsigned long rss = 0;
1082 unsigned long nr_scan_pages;
1085 * Calculations based on RSS as non-present and empty pages are skipped
1086 * by the PTE scanner and NUMA hinting faults should be trapped based
1089 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
1090 rss = get_mm_rss(p->mm);
1092 rss = nr_scan_pages;
1094 rss = round_up(rss, nr_scan_pages);
1095 return rss / nr_scan_pages;
1098 /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
1099 #define MAX_SCAN_WINDOW 2560
1101 static unsigned int task_scan_min(struct task_struct *p)
1103 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
1104 unsigned int scan, floor;
1105 unsigned int windows = 1;
1107 if (scan_size < MAX_SCAN_WINDOW)
1108 windows = MAX_SCAN_WINDOW / scan_size;
1109 floor = 1000 / windows;
1111 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
1112 return max_t(unsigned int, floor, scan);
1115 static unsigned int task_scan_max(struct task_struct *p)
1117 unsigned int smin = task_scan_min(p);
1120 /* Watch for min being lower than max due to floor calculations */
1121 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
1122 return max(smin, smax);
1125 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1127 rq->nr_numa_running += (p->numa_preferred_nid != -1);
1128 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
1131 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1133 rq->nr_numa_running -= (p->numa_preferred_nid != -1);
1134 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
1140 spinlock_t lock; /* nr_tasks, tasks */
1145 struct rcu_head rcu;
1146 unsigned long total_faults;
1147 unsigned long max_faults_cpu;
1149 * Faults_cpu is used to decide whether memory should move
1150 * towards the CPU. As a consequence, these stats are weighted
1151 * more by CPU use than by memory faults.
1153 unsigned long *faults_cpu;
1154 unsigned long faults[0];
1157 /* Shared or private faults. */
1158 #define NR_NUMA_HINT_FAULT_TYPES 2
1160 /* Memory and CPU locality */
1161 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
1163 /* Averaged statistics, and temporary buffers. */
1164 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
1166 pid_t task_numa_group_id(struct task_struct *p)
1168 return p->numa_group ? p->numa_group->gid : 0;
1172 * The averaged statistics, shared & private, memory & cpu,
1173 * occupy the first half of the array. The second half of the
1174 * array is for current counters, which are averaged into the
1175 * first set by task_numa_placement.
1177 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
1179 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
1182 static inline unsigned long task_faults(struct task_struct *p, int nid)
1184 if (!p->numa_faults)
1187 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1188 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
1191 static inline unsigned long group_faults(struct task_struct *p, int nid)
1196 return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1197 p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
1200 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
1202 return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
1203 group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
1207 * A node triggering more than 1/3 as many NUMA faults as the maximum is
1208 * considered part of a numa group's pseudo-interleaving set. Migrations
1209 * between these nodes are slowed down, to allow things to settle down.
1211 #define ACTIVE_NODE_FRACTION 3
1213 static bool numa_is_active_node(int nid, struct numa_group *ng)
1215 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1218 /* Handle placement on systems where not all nodes are directly connected. */
1219 static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
1220 int maxdist, bool task)
1222 unsigned long score = 0;
1226 * All nodes are directly connected, and the same distance
1227 * from each other. No need for fancy placement algorithms.
1229 if (sched_numa_topology_type == NUMA_DIRECT)
1233 * This code is called for each node, introducing N^2 complexity,
1234 * which should be ok given the number of nodes rarely exceeds 8.
1236 for_each_online_node(node) {
1237 unsigned long faults;
1238 int dist = node_distance(nid, node);
1241 * The furthest away nodes in the system are not interesting
1242 * for placement; nid was already counted.
1244 if (dist == sched_max_numa_distance || node == nid)
1248 * On systems with a backplane NUMA topology, compare groups
1249 * of nodes, and move tasks towards the group with the most
1250 * memory accesses. When comparing two nodes at distance
1251 * "hoplimit", only nodes closer by than "hoplimit" are part
1252 * of each group. Skip other nodes.
1254 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1258 /* Add up the faults from nearby nodes. */
1260 faults = task_faults(p, node);
1262 faults = group_faults(p, node);
1265 * On systems with a glueless mesh NUMA topology, there are
1266 * no fixed "groups of nodes". Instead, nodes that are not
1267 * directly connected bounce traffic through intermediate
1268 * nodes; a numa_group can occupy any set of nodes.
1269 * The further away a node is, the less the faults count.
1270 * This seems to result in good task placement.
1272 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1273 faults *= (sched_max_numa_distance - dist);
1274 faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
1284 * These return the fraction of accesses done by a particular task, or
1285 * task group, on a particular numa node. The group weight is given a
1286 * larger multiplier, in order to group tasks together that are almost
1287 * evenly spread out between numa nodes.
1289 static inline unsigned long task_weight(struct task_struct *p, int nid,
1292 unsigned long faults, total_faults;
1294 if (!p->numa_faults)
1297 total_faults = p->total_numa_faults;
1302 faults = task_faults(p, nid);
1303 faults += score_nearby_nodes(p, nid, dist, true);
1305 return 1000 * faults / total_faults;
1308 static inline unsigned long group_weight(struct task_struct *p, int nid,
1311 unsigned long faults, total_faults;
1316 total_faults = p->numa_group->total_faults;
1321 faults = group_faults(p, nid);
1322 faults += score_nearby_nodes(p, nid, dist, false);
1324 return 1000 * faults / total_faults;
1327 bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1328 int src_nid, int dst_cpu)
1330 struct numa_group *ng = p->numa_group;
1331 int dst_nid = cpu_to_node(dst_cpu);
1332 int last_cpupid, this_cpupid;
1334 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1337 * Multi-stage node selection is used in conjunction with a periodic
1338 * migration fault to build a temporal task<->page relation. By using
1339 * a two-stage filter we remove short/unlikely relations.
1341 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1342 * a task's usage of a particular page (n_p) per total usage of this
1343 * page (n_t) (in a given time-span) to a probability.
1345 * Our periodic faults will sample this probability and getting the
1346 * same result twice in a row, given these samples are fully
1347 * independent, is then given by P(n)^2, provided our sample period
1348 * is sufficiently short compared to the usage pattern.
1350 * This quadric squishes small probabilities, making it less likely we
1351 * act on an unlikely task<->page relation.
1353 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1354 if (!cpupid_pid_unset(last_cpupid) &&
1355 cpupid_to_nid(last_cpupid) != dst_nid)
1358 /* Always allow migrate on private faults */
1359 if (cpupid_match_pid(p, last_cpupid))
1362 /* A shared fault, but p->numa_group has not been set up yet. */
1367 * Destination node is much more heavily used than the source
1368 * node? Allow migration.
1370 if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
1371 ACTIVE_NODE_FRACTION)
1375 * Distribute memory according to CPU & memory use on each node,
1376 * with 3/4 hysteresis to avoid unnecessary memory migrations:
1378 * faults_cpu(dst) 3 faults_cpu(src)
1379 * --------------- * - > ---------------
1380 * faults_mem(dst) 4 faults_mem(src)
1382 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
1383 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
1386 static unsigned long weighted_cpuload(const int cpu);
1387 static unsigned long source_load(int cpu, int type);
1388 static unsigned long target_load(int cpu, int type);
1389 static unsigned long capacity_of(int cpu);
1390 static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
1392 /* Cached statistics for all CPUs within a node */
1394 unsigned long nr_running;
1397 /* Total compute capacity of CPUs on a node */
1398 unsigned long compute_capacity;
1400 /* Approximate capacity in terms of runnable tasks on a node */
1401 unsigned long task_capacity;
1402 int has_free_capacity;
1406 * XXX borrowed from update_sg_lb_stats
1408 static void update_numa_stats(struct numa_stats *ns, int nid)
1410 int smt, cpu, cpus = 0;
1411 unsigned long capacity;
1413 memset(ns, 0, sizeof(*ns));
1414 for_each_cpu(cpu, cpumask_of_node(nid)) {
1415 struct rq *rq = cpu_rq(cpu);
1417 ns->nr_running += rq->nr_running;
1418 ns->load += weighted_cpuload(cpu);
1419 ns->compute_capacity += capacity_of(cpu);
1425 * If we raced with hotplug and there are no CPUs left in our mask
1426 * the @ns structure is NULL'ed and task_numa_compare() will
1427 * not find this node attractive.
1429 * We'll either bail at !has_free_capacity, or we'll detect a huge
1430 * imbalance and bail there.
1435 /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
1436 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
1437 capacity = cpus / smt; /* cores */
1439 ns->task_capacity = min_t(unsigned, capacity,
1440 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
1441 ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
1444 struct task_numa_env {
1445 struct task_struct *p;
1447 int src_cpu, src_nid;
1448 int dst_cpu, dst_nid;
1450 struct numa_stats src_stats, dst_stats;
1455 struct task_struct *best_task;
1460 static void task_numa_assign(struct task_numa_env *env,
1461 struct task_struct *p, long imp)
1464 put_task_struct(env->best_task);
1469 env->best_imp = imp;
1470 env->best_cpu = env->dst_cpu;
1473 static bool load_too_imbalanced(long src_load, long dst_load,
1474 struct task_numa_env *env)
1477 long orig_src_load, orig_dst_load;
1478 long src_capacity, dst_capacity;
1481 * The load is corrected for the CPU capacity available on each node.
1484 * ------------ vs ---------
1485 * src_capacity dst_capacity
1487 src_capacity = env->src_stats.compute_capacity;
1488 dst_capacity = env->dst_stats.compute_capacity;
1490 /* We care about the slope of the imbalance, not the direction. */
1491 if (dst_load < src_load)
1492 swap(dst_load, src_load);
1494 /* Is the difference below the threshold? */
1495 imb = dst_load * src_capacity * 100 -
1496 src_load * dst_capacity * env->imbalance_pct;
1501 * The imbalance is above the allowed threshold.
1502 * Compare it with the old imbalance.
1504 orig_src_load = env->src_stats.load;
1505 orig_dst_load = env->dst_stats.load;
1507 if (orig_dst_load < orig_src_load)
1508 swap(orig_dst_load, orig_src_load);
1510 old_imb = orig_dst_load * src_capacity * 100 -
1511 orig_src_load * dst_capacity * env->imbalance_pct;
1513 /* Would this change make things worse? */
1514 return (imb > old_imb);
1518 * This checks if the overall compute and NUMA accesses of the system would
1519 * be improved if the source tasks was migrated to the target dst_cpu taking
1520 * into account that it might be best if task running on the dst_cpu should
1521 * be exchanged with the source task
1523 static void task_numa_compare(struct task_numa_env *env,
1524 long taskimp, long groupimp)
1526 struct rq *src_rq = cpu_rq(env->src_cpu);
1527 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1528 struct task_struct *cur;
1529 long src_load, dst_load;
1531 long imp = env->p->numa_group ? groupimp : taskimp;
1533 int dist = env->dist;
1536 cur = task_rcu_dereference(&dst_rq->curr);
1537 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
1541 * Because we have preemption enabled we can get migrated around and
1542 * end try selecting ourselves (current == env->p) as a swap candidate.
1548 * "imp" is the fault differential for the source task between the
1549 * source and destination node. Calculate the total differential for
1550 * the source task and potential destination task. The more negative
1551 * the value is, the more rmeote accesses that would be expected to
1552 * be incurred if the tasks were swapped.
1555 /* Skip this swap candidate if cannot move to the source cpu */
1556 if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
1560 * If dst and source tasks are in the same NUMA group, or not
1561 * in any group then look only at task weights.
1563 if (cur->numa_group == env->p->numa_group) {
1564 imp = taskimp + task_weight(cur, env->src_nid, dist) -
1565 task_weight(cur, env->dst_nid, dist);
1567 * Add some hysteresis to prevent swapping the
1568 * tasks within a group over tiny differences.
1570 if (cur->numa_group)
1574 * Compare the group weights. If a task is all by
1575 * itself (not part of a group), use the task weight
1578 if (cur->numa_group)
1579 imp += group_weight(cur, env->src_nid, dist) -
1580 group_weight(cur, env->dst_nid, dist);
1582 imp += task_weight(cur, env->src_nid, dist) -
1583 task_weight(cur, env->dst_nid, dist);
1587 if (imp <= env->best_imp && moveimp <= env->best_imp)
1591 /* Is there capacity at our destination? */
1592 if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
1593 !env->dst_stats.has_free_capacity)
1599 /* Balance doesn't matter much if we're running a task per cpu */
1600 if (imp > env->best_imp && src_rq->nr_running == 1 &&
1601 dst_rq->nr_running == 1)
1605 * In the overloaded case, try and keep the load balanced.
1608 load = task_h_load(env->p);
1609 dst_load = env->dst_stats.load + load;
1610 src_load = env->src_stats.load - load;
1612 if (moveimp > imp && moveimp > env->best_imp) {
1614 * If the improvement from just moving env->p direction is
1615 * better than swapping tasks around, check if a move is
1616 * possible. Store a slightly smaller score than moveimp,
1617 * so an actually idle CPU will win.
1619 if (!load_too_imbalanced(src_load, dst_load, env)) {
1626 if (imp <= env->best_imp)
1630 load = task_h_load(cur);
1635 if (load_too_imbalanced(src_load, dst_load, env))
1639 * One idle CPU per node is evaluated for a task numa move.
1640 * Call select_idle_sibling to maybe find a better one.
1644 * select_idle_siblings() uses an per-cpu cpumask that
1645 * can be used from IRQ context.
1647 local_irq_disable();
1648 env->dst_cpu = select_idle_sibling(env->p, env->src_cpu,
1654 task_numa_assign(env, cur, imp);
1659 static void task_numa_find_cpu(struct task_numa_env *env,
1660 long taskimp, long groupimp)
1664 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1665 /* Skip this CPU if the source task cannot migrate */
1666 if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed))
1670 task_numa_compare(env, taskimp, groupimp);
1674 /* Only move tasks to a NUMA node less busy than the current node. */
1675 static bool numa_has_capacity(struct task_numa_env *env)
1677 struct numa_stats *src = &env->src_stats;
1678 struct numa_stats *dst = &env->dst_stats;
1680 if (src->has_free_capacity && !dst->has_free_capacity)
1684 * Only consider a task move if the source has a higher load
1685 * than the destination, corrected for CPU capacity on each node.
1687 * src->load dst->load
1688 * --------------------- vs ---------------------
1689 * src->compute_capacity dst->compute_capacity
1691 if (src->load * dst->compute_capacity * env->imbalance_pct >
1693 dst->load * src->compute_capacity * 100)
1699 static int task_numa_migrate(struct task_struct *p)
1701 struct task_numa_env env = {
1704 .src_cpu = task_cpu(p),
1705 .src_nid = task_node(p),
1707 .imbalance_pct = 112,
1713 struct sched_domain *sd;
1714 unsigned long taskweight, groupweight;
1716 long taskimp, groupimp;
1719 * Pick the lowest SD_NUMA domain, as that would have the smallest
1720 * imbalance and would be the first to start moving tasks about.
1722 * And we want to avoid any moving of tasks about, as that would create
1723 * random movement of tasks -- counter the numa conditions we're trying
1727 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
1729 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
1733 * Cpusets can break the scheduler domain tree into smaller
1734 * balance domains, some of which do not cross NUMA boundaries.
1735 * Tasks that are "trapped" in such domains cannot be migrated
1736 * elsewhere, so there is no point in (re)trying.
1738 if (unlikely(!sd)) {
1739 p->numa_preferred_nid = task_node(p);
1743 env.dst_nid = p->numa_preferred_nid;
1744 dist = env.dist = node_distance(env.src_nid, env.dst_nid);
1745 taskweight = task_weight(p, env.src_nid, dist);
1746 groupweight = group_weight(p, env.src_nid, dist);
1747 update_numa_stats(&env.src_stats, env.src_nid);
1748 taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
1749 groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
1750 update_numa_stats(&env.dst_stats, env.dst_nid);
1752 /* Try to find a spot on the preferred nid. */
1753 if (numa_has_capacity(&env))
1754 task_numa_find_cpu(&env, taskimp, groupimp);
1757 * Look at other nodes in these cases:
1758 * - there is no space available on the preferred_nid
1759 * - the task is part of a numa_group that is interleaved across
1760 * multiple NUMA nodes; in order to better consolidate the group,
1761 * we need to check other locations.
1763 if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) {
1764 for_each_online_node(nid) {
1765 if (nid == env.src_nid || nid == p->numa_preferred_nid)
1768 dist = node_distance(env.src_nid, env.dst_nid);
1769 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1771 taskweight = task_weight(p, env.src_nid, dist);
1772 groupweight = group_weight(p, env.src_nid, dist);
1775 /* Only consider nodes where both task and groups benefit */
1776 taskimp = task_weight(p, nid, dist) - taskweight;
1777 groupimp = group_weight(p, nid, dist) - groupweight;
1778 if (taskimp < 0 && groupimp < 0)
1783 update_numa_stats(&env.dst_stats, env.dst_nid);
1784 if (numa_has_capacity(&env))
1785 task_numa_find_cpu(&env, taskimp, groupimp);
1790 * If the task is part of a workload that spans multiple NUMA nodes,
1791 * and is migrating into one of the workload's active nodes, remember
1792 * this node as the task's preferred numa node, so the workload can
1794 * A task that migrated to a second choice node will be better off
1795 * trying for a better one later. Do not set the preferred node here.
1797 if (p->numa_group) {
1798 struct numa_group *ng = p->numa_group;
1800 if (env.best_cpu == -1)
1805 if (ng->active_nodes > 1 && numa_is_active_node(env.dst_nid, ng))
1806 sched_setnuma(p, env.dst_nid);
1809 /* No better CPU than the current one was found. */
1810 if (env.best_cpu == -1)
1814 * Reset the scan period if the task is being rescheduled on an
1815 * alternative node to recheck if the tasks is now properly placed.
1817 p->numa_scan_period = task_scan_min(p);
1819 if (env.best_task == NULL) {
1820 ret = migrate_task_to(p, env.best_cpu);
1822 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
1826 ret = migrate_swap(p, env.best_task);
1828 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
1829 put_task_struct(env.best_task);
1833 /* Attempt to migrate a task to a CPU on the preferred node. */
1834 static void numa_migrate_preferred(struct task_struct *p)
1836 unsigned long interval = HZ;
1838 /* This task has no NUMA fault statistics yet */
1839 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
1842 /* Periodically retry migrating the task to the preferred node */
1843 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
1844 p->numa_migrate_retry = jiffies + interval;
1846 /* Success if task is already running on preferred CPU */
1847 if (task_node(p) == p->numa_preferred_nid)
1850 /* Otherwise, try migrate to a CPU on the preferred node */
1851 task_numa_migrate(p);
1855 * Find out how many nodes on the workload is actively running on. Do this by
1856 * tracking the nodes from which NUMA hinting faults are triggered. This can
1857 * be different from the set of nodes where the workload's memory is currently
1860 static void numa_group_count_active_nodes(struct numa_group *numa_group)
1862 unsigned long faults, max_faults = 0;
1863 int nid, active_nodes = 0;
1865 for_each_online_node(nid) {
1866 faults = group_faults_cpu(numa_group, nid);
1867 if (faults > max_faults)
1868 max_faults = faults;
1871 for_each_online_node(nid) {
1872 faults = group_faults_cpu(numa_group, nid);
1873 if (faults * ACTIVE_NODE_FRACTION > max_faults)
1877 numa_group->max_faults_cpu = max_faults;
1878 numa_group->active_nodes = active_nodes;
1882 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1883 * increments. The more local the fault statistics are, the higher the scan
1884 * period will be for the next scan window. If local/(local+remote) ratio is
1885 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
1886 * the scan period will decrease. Aim for 70% local accesses.
1888 #define NUMA_PERIOD_SLOTS 10
1889 #define NUMA_PERIOD_THRESHOLD 7
1892 * Increase the scan period (slow down scanning) if the majority of
1893 * our memory is already on our local node, or if the majority of
1894 * the page accesses are shared with other processes.
1895 * Otherwise, decrease the scan period.
1897 static void update_task_scan_period(struct task_struct *p,
1898 unsigned long shared, unsigned long private)
1900 unsigned int period_slot;
1904 unsigned long remote = p->numa_faults_locality[0];
1905 unsigned long local = p->numa_faults_locality[1];
1908 * If there were no record hinting faults then either the task is
1909 * completely idle or all activity is areas that are not of interest
1910 * to automatic numa balancing. Related to that, if there were failed
1911 * migration then it implies we are migrating too quickly or the local
1912 * node is overloaded. In either case, scan slower
1914 if (local + shared == 0 || p->numa_faults_locality[2]) {
1915 p->numa_scan_period = min(p->numa_scan_period_max,
1916 p->numa_scan_period << 1);
1918 p->mm->numa_next_scan = jiffies +
1919 msecs_to_jiffies(p->numa_scan_period);
1925 * Prepare to scale scan period relative to the current period.
1926 * == NUMA_PERIOD_THRESHOLD scan period stays the same
1927 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1928 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1930 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1931 ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1932 if (ratio >= NUMA_PERIOD_THRESHOLD) {
1933 int slot = ratio - NUMA_PERIOD_THRESHOLD;
1936 diff = slot * period_slot;
1938 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1941 * Scale scan rate increases based on sharing. There is an
1942 * inverse relationship between the degree of sharing and
1943 * the adjustment made to the scanning period. Broadly
1944 * speaking the intent is that there is little point
1945 * scanning faster if shared accesses dominate as it may
1946 * simply bounce migrations uselessly
1948 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
1949 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1952 p->numa_scan_period = clamp(p->numa_scan_period + diff,
1953 task_scan_min(p), task_scan_max(p));
1954 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1958 * Get the fraction of time the task has been running since the last
1959 * NUMA placement cycle. The scheduler keeps similar statistics, but
1960 * decays those on a 32ms period, which is orders of magnitude off
1961 * from the dozens-of-seconds NUMA balancing period. Use the scheduler
1962 * stats only if the task is so new there are no NUMA statistics yet.
1964 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1966 u64 runtime, delta, now;
1967 /* Use the start of this time slice to avoid calculations. */
1968 now = p->se.exec_start;
1969 runtime = p->se.sum_exec_runtime;
1971 if (p->last_task_numa_placement) {
1972 delta = runtime - p->last_sum_exec_runtime;
1973 *period = now - p->last_task_numa_placement;
1975 delta = p->se.avg.load_sum / p->se.load.weight;
1976 *period = LOAD_AVG_MAX;
1979 p->last_sum_exec_runtime = runtime;
1980 p->last_task_numa_placement = now;
1986 * Determine the preferred nid for a task in a numa_group. This needs to
1987 * be done in a way that produces consistent results with group_weight,
1988 * otherwise workloads might not converge.
1990 static int preferred_group_nid(struct task_struct *p, int nid)
1995 /* Direct connections between all NUMA nodes. */
1996 if (sched_numa_topology_type == NUMA_DIRECT)
2000 * On a system with glueless mesh NUMA topology, group_weight
2001 * scores nodes according to the number of NUMA hinting faults on
2002 * both the node itself, and on nearby nodes.
2004 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
2005 unsigned long score, max_score = 0;
2006 int node, max_node = nid;
2008 dist = sched_max_numa_distance;
2010 for_each_online_node(node) {
2011 score = group_weight(p, node, dist);
2012 if (score > max_score) {
2021 * Finding the preferred nid in a system with NUMA backplane
2022 * interconnect topology is more involved. The goal is to locate
2023 * tasks from numa_groups near each other in the system, and
2024 * untangle workloads from different sides of the system. This requires
2025 * searching down the hierarchy of node groups, recursively searching
2026 * inside the highest scoring group of nodes. The nodemask tricks
2027 * keep the complexity of the search down.
2029 nodes = node_online_map;
2030 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
2031 unsigned long max_faults = 0;
2032 nodemask_t max_group = NODE_MASK_NONE;
2035 /* Are there nodes at this distance from each other? */
2036 if (!find_numa_distance(dist))
2039 for_each_node_mask(a, nodes) {
2040 unsigned long faults = 0;
2041 nodemask_t this_group;
2042 nodes_clear(this_group);
2044 /* Sum group's NUMA faults; includes a==b case. */
2045 for_each_node_mask(b, nodes) {
2046 if (node_distance(a, b) < dist) {
2047 faults += group_faults(p, b);
2048 node_set(b, this_group);
2049 node_clear(b, nodes);
2053 /* Remember the top group. */
2054 if (faults > max_faults) {
2055 max_faults = faults;
2056 max_group = this_group;
2058 * subtle: at the smallest distance there is
2059 * just one node left in each "group", the
2060 * winner is the preferred nid.
2065 /* Next round, evaluate the nodes within max_group. */
2073 static void task_numa_placement(struct task_struct *p)
2075 int seq, nid, max_nid = -1, max_group_nid = -1;
2076 unsigned long max_faults = 0, max_group_faults = 0;
2077 unsigned long fault_types[2] = { 0, 0 };
2078 unsigned long total_faults;
2079 u64 runtime, period;
2080 spinlock_t *group_lock = NULL;
2083 * The p->mm->numa_scan_seq field gets updated without
2084 * exclusive access. Use READ_ONCE() here to ensure
2085 * that the field is read in a single access:
2087 seq = READ_ONCE(p->mm->numa_scan_seq);
2088 if (p->numa_scan_seq == seq)
2090 p->numa_scan_seq = seq;
2091 p->numa_scan_period_max = task_scan_max(p);
2093 total_faults = p->numa_faults_locality[0] +
2094 p->numa_faults_locality[1];
2095 runtime = numa_get_avg_runtime(p, &period);
2097 /* If the task is part of a group prevent parallel updates to group stats */
2098 if (p->numa_group) {
2099 group_lock = &p->numa_group->lock;
2100 spin_lock_irq(group_lock);
2103 /* Find the node with the highest number of faults */
2104 for_each_online_node(nid) {
2105 /* Keep track of the offsets in numa_faults array */
2106 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
2107 unsigned long faults = 0, group_faults = 0;
2110 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
2111 long diff, f_diff, f_weight;
2113 mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
2114 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
2115 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
2116 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
2118 /* Decay existing window, copy faults since last scan */
2119 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
2120 fault_types[priv] += p->numa_faults[membuf_idx];
2121 p->numa_faults[membuf_idx] = 0;
2124 * Normalize the faults_from, so all tasks in a group
2125 * count according to CPU use, instead of by the raw
2126 * number of faults. Tasks with little runtime have
2127 * little over-all impact on throughput, and thus their
2128 * faults are less important.
2130 f_weight = div64_u64(runtime << 16, period + 1);
2131 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
2133 f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
2134 p->numa_faults[cpubuf_idx] = 0;
2136 p->numa_faults[mem_idx] += diff;
2137 p->numa_faults[cpu_idx] += f_diff;
2138 faults += p->numa_faults[mem_idx];
2139 p->total_numa_faults += diff;
2140 if (p->numa_group) {
2142 * safe because we can only change our own group
2144 * mem_idx represents the offset for a given
2145 * nid and priv in a specific region because it
2146 * is at the beginning of the numa_faults array.
2148 p->numa_group->faults[mem_idx] += diff;
2149 p->numa_group->faults_cpu[mem_idx] += f_diff;
2150 p->numa_group->total_faults += diff;
2151 group_faults += p->numa_group->faults[mem_idx];
2155 if (faults > max_faults) {
2156 max_faults = faults;
2160 if (group_faults > max_group_faults) {
2161 max_group_faults = group_faults;
2162 max_group_nid = nid;
2166 update_task_scan_period(p, fault_types[0], fault_types[1]);
2168 if (p->numa_group) {
2169 numa_group_count_active_nodes(p->numa_group);
2170 spin_unlock_irq(group_lock);
2171 max_nid = preferred_group_nid(p, max_group_nid);
2175 /* Set the new preferred node */
2176 if (max_nid != p->numa_preferred_nid)
2177 sched_setnuma(p, max_nid);
2179 if (task_node(p) != p->numa_preferred_nid)
2180 numa_migrate_preferred(p);
2184 static inline int get_numa_group(struct numa_group *grp)
2186 return atomic_inc_not_zero(&grp->refcount);
2189 static inline void put_numa_group(struct numa_group *grp)
2191 if (atomic_dec_and_test(&grp->refcount))
2192 kfree_rcu(grp, rcu);
2195 static void task_numa_group(struct task_struct *p, int cpupid, int flags,
2198 struct numa_group *grp, *my_grp;
2199 struct task_struct *tsk;
2201 int cpu = cpupid_to_cpu(cpupid);
2204 if (unlikely(!p->numa_group)) {
2205 unsigned int size = sizeof(struct numa_group) +
2206 4*nr_node_ids*sizeof(unsigned long);
2208 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
2212 atomic_set(&grp->refcount, 1);
2213 grp->active_nodes = 1;
2214 grp->max_faults_cpu = 0;
2215 spin_lock_init(&grp->lock);
2217 /* Second half of the array tracks nids where faults happen */
2218 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
2221 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2222 grp->faults[i] = p->numa_faults[i];
2224 grp->total_faults = p->total_numa_faults;
2227 rcu_assign_pointer(p->numa_group, grp);
2231 tsk = READ_ONCE(cpu_rq(cpu)->curr);
2233 if (!cpupid_match_pid(tsk, cpupid))
2236 grp = rcu_dereference(tsk->numa_group);
2240 my_grp = p->numa_group;
2245 * Only join the other group if its bigger; if we're the bigger group,
2246 * the other task will join us.
2248 if (my_grp->nr_tasks > grp->nr_tasks)
2252 * Tie-break on the grp address.
2254 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
2257 /* Always join threads in the same process. */
2258 if (tsk->mm == current->mm)
2261 /* Simple filter to avoid false positives due to PID collisions */
2262 if (flags & TNF_SHARED)
2265 /* Update priv based on whether false sharing was detected */
2268 if (join && !get_numa_group(grp))
2276 BUG_ON(irqs_disabled());
2277 double_lock_irq(&my_grp->lock, &grp->lock);
2279 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
2280 my_grp->faults[i] -= p->numa_faults[i];
2281 grp->faults[i] += p->numa_faults[i];
2283 my_grp->total_faults -= p->total_numa_faults;
2284 grp->total_faults += p->total_numa_faults;
2289 spin_unlock(&my_grp->lock);
2290 spin_unlock_irq(&grp->lock);
2292 rcu_assign_pointer(p->numa_group, grp);
2294 put_numa_group(my_grp);
2302 void task_numa_free(struct task_struct *p)
2304 struct numa_group *grp = p->numa_group;
2305 void *numa_faults = p->numa_faults;
2306 unsigned long flags;
2310 spin_lock_irqsave(&grp->lock, flags);
2311 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2312 grp->faults[i] -= p->numa_faults[i];
2313 grp->total_faults -= p->total_numa_faults;
2316 spin_unlock_irqrestore(&grp->lock, flags);
2317 RCU_INIT_POINTER(p->numa_group, NULL);
2318 put_numa_group(grp);
2321 p->numa_faults = NULL;
2326 * Got a PROT_NONE fault for a page on @node.
2328 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2330 struct task_struct *p = current;
2331 bool migrated = flags & TNF_MIGRATED;
2332 int cpu_node = task_node(current);
2333 int local = !!(flags & TNF_FAULT_LOCAL);
2334 struct numa_group *ng;
2337 if (!static_branch_likely(&sched_numa_balancing))
2340 /* for example, ksmd faulting in a user's mm */
2344 /* Allocate buffer to track faults on a per-node basis */
2345 if (unlikely(!p->numa_faults)) {
2346 int size = sizeof(*p->numa_faults) *
2347 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
2349 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
2350 if (!p->numa_faults)
2353 p->total_numa_faults = 0;
2354 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2358 * First accesses are treated as private, otherwise consider accesses
2359 * to be private if the accessing pid has not changed
2361 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2364 priv = cpupid_match_pid(p, last_cpupid);
2365 if (!priv && !(flags & TNF_NO_GROUP))
2366 task_numa_group(p, last_cpupid, flags, &priv);
2370 * If a workload spans multiple NUMA nodes, a shared fault that
2371 * occurs wholly within the set of nodes that the workload is
2372 * actively using should be counted as local. This allows the
2373 * scan rate to slow down when a workload has settled down.
2376 if (!priv && !local && ng && ng->active_nodes > 1 &&
2377 numa_is_active_node(cpu_node, ng) &&
2378 numa_is_active_node(mem_node, ng))
2381 task_numa_placement(p);
2384 * Retry task to preferred node migration periodically, in case it
2385 * case it previously failed, or the scheduler moved us.
2387 if (time_after(jiffies, p->numa_migrate_retry))
2388 numa_migrate_preferred(p);
2391 p->numa_pages_migrated += pages;
2392 if (flags & TNF_MIGRATE_FAIL)
2393 p->numa_faults_locality[2] += pages;
2395 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2396 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
2397 p->numa_faults_locality[local] += pages;
2400 static void reset_ptenuma_scan(struct task_struct *p)
2403 * We only did a read acquisition of the mmap sem, so
2404 * p->mm->numa_scan_seq is written to without exclusive access
2405 * and the update is not guaranteed to be atomic. That's not
2406 * much of an issue though, since this is just used for
2407 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
2408 * expensive, to avoid any form of compiler optimizations:
2410 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
2411 p->mm->numa_scan_offset = 0;
2415 * The expensive part of numa migration is done from task_work context.
2416 * Triggered from task_tick_numa().
2418 void task_numa_work(struct callback_head *work)
2420 unsigned long migrate, next_scan, now = jiffies;
2421 struct task_struct *p = current;
2422 struct mm_struct *mm = p->mm;
2423 u64 runtime = p->se.sum_exec_runtime;
2424 struct vm_area_struct *vma;
2425 unsigned long start, end;
2426 unsigned long nr_pte_updates = 0;
2427 long pages, virtpages;
2429 SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
2431 work->next = work; /* protect against double add */
2433 * Who cares about NUMA placement when they're dying.
2435 * NOTE: make sure not to dereference p->mm before this check,
2436 * exit_task_work() happens _after_ exit_mm() so we could be called
2437 * without p->mm even though we still had it when we enqueued this
2440 if (p->flags & PF_EXITING)
2443 if (!mm->numa_next_scan) {
2444 mm->numa_next_scan = now +
2445 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2449 * Enforce maximal scan/migration frequency..
2451 migrate = mm->numa_next_scan;
2452 if (time_before(now, migrate))
2455 if (p->numa_scan_period == 0) {
2456 p->numa_scan_period_max = task_scan_max(p);
2457 p->numa_scan_period = task_scan_min(p);
2460 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
2461 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
2465 * Delay this task enough that another task of this mm will likely win
2466 * the next time around.
2468 p->node_stamp += 2 * TICK_NSEC;
2470 start = mm->numa_scan_offset;
2471 pages = sysctl_numa_balancing_scan_size;
2472 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
2473 virtpages = pages * 8; /* Scan up to this much virtual space */
2478 down_read(&mm->mmap_sem);
2479 vma = find_vma(mm, start);
2481 reset_ptenuma_scan(p);
2485 for (; vma; vma = vma->vm_next) {
2486 if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
2487 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
2492 * Shared library pages mapped by multiple processes are not
2493 * migrated as it is expected they are cache replicated. Avoid
2494 * hinting faults in read-only file-backed mappings or the vdso
2495 * as migrating the pages will be of marginal benefit.
2498 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
2502 * Skip inaccessible VMAs to avoid any confusion between
2503 * PROT_NONE and NUMA hinting ptes
2505 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
2509 start = max(start, vma->vm_start);
2510 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2511 end = min(end, vma->vm_end);
2512 nr_pte_updates = change_prot_numa(vma, start, end);
2515 * Try to scan sysctl_numa_balancing_size worth of
2516 * hpages that have at least one present PTE that
2517 * is not already pte-numa. If the VMA contains
2518 * areas that are unused or already full of prot_numa
2519 * PTEs, scan up to virtpages, to skip through those
2523 pages -= (end - start) >> PAGE_SHIFT;
2524 virtpages -= (end - start) >> PAGE_SHIFT;
2527 if (pages <= 0 || virtpages <= 0)
2531 } while (end != vma->vm_end);
2536 * It is possible to reach the end of the VMA list but the last few
2537 * VMAs are not guaranteed to the vma_migratable. If they are not, we
2538 * would find the !migratable VMA on the next scan but not reset the
2539 * scanner to the start so check it now.
2542 mm->numa_scan_offset = start;
2544 reset_ptenuma_scan(p);
2545 up_read(&mm->mmap_sem);
2548 * Make sure tasks use at least 32x as much time to run other code
2549 * than they used here, to limit NUMA PTE scanning overhead to 3% max.
2550 * Usually update_task_scan_period slows down scanning enough; on an
2551 * overloaded system we need to limit overhead on a per task basis.
2553 if (unlikely(p->se.sum_exec_runtime != runtime)) {
2554 u64 diff = p->se.sum_exec_runtime - runtime;
2555 p->node_stamp += 32 * diff;
2560 * Drive the periodic memory faults..
2562 void task_tick_numa(struct rq *rq, struct task_struct *curr)
2564 struct callback_head *work = &curr->numa_work;
2568 * We don't care about NUMA placement if we don't have memory.
2570 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
2574 * Using runtime rather than walltime has the dual advantage that
2575 * we (mostly) drive the selection from busy threads and that the
2576 * task needs to have done some actual work before we bother with
2579 now = curr->se.sum_exec_runtime;
2580 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2582 if (now > curr->node_stamp + period) {
2583 if (!curr->node_stamp)
2584 curr->numa_scan_period = task_scan_min(curr);
2585 curr->node_stamp += period;
2587 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
2588 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
2589 task_work_add(curr, work, true);
2594 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2598 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
2602 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2605 #endif /* CONFIG_NUMA_BALANCING */
2608 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2610 update_load_add(&cfs_rq->load, se->load.weight);
2611 if (!parent_entity(se))
2612 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
2614 if (entity_is_task(se)) {
2615 struct rq *rq = rq_of(cfs_rq);
2617 account_numa_enqueue(rq, task_of(se));
2618 list_add(&se->group_node, &rq->cfs_tasks);
2621 cfs_rq->nr_running++;
2625 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2627 update_load_sub(&cfs_rq->load, se->load.weight);
2628 if (!parent_entity(se))
2629 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
2631 if (entity_is_task(se)) {
2632 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
2633 list_del_init(&se->group_node);
2636 cfs_rq->nr_running--;
2639 #ifdef CONFIG_FAIR_GROUP_SCHED
2641 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2643 long tg_weight, load, shares;
2646 * This really should be: cfs_rq->avg.load_avg, but instead we use
2647 * cfs_rq->load.weight, which is its upper bound. This helps ramp up
2648 * the shares for small weight interactive tasks.
2650 load = scale_load_down(cfs_rq->load.weight);
2652 tg_weight = atomic_long_read(&tg->load_avg);
2654 /* Ensure tg_weight >= load */
2655 tg_weight -= cfs_rq->tg_load_avg_contrib;
2658 shares = (tg->shares * load);
2660 shares /= tg_weight;
2663 * MIN_SHARES has to be unscaled here to support per-CPU partitioning
2664 * of a group with small tg->shares value. It is a floor value which is
2665 * assigned as a minimum load.weight to the sched_entity representing
2666 * the group on a CPU.
2668 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024
2669 * on an 8-core system with 8 tasks each runnable on one CPU shares has
2670 * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In
2671 * case no task is runnable on a CPU MIN_SHARES=2 should be returned
2674 if (shares < MIN_SHARES)
2675 shares = MIN_SHARES;
2676 if (shares > tg->shares)
2677 shares = tg->shares;
2681 # else /* CONFIG_SMP */
2682 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2686 # endif /* CONFIG_SMP */
2688 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2689 unsigned long weight)
2692 /* commit outstanding execution time */
2693 if (cfs_rq->curr == se)
2694 update_curr(cfs_rq);
2695 account_entity_dequeue(cfs_rq, se);
2698 update_load_set(&se->load, weight);
2701 account_entity_enqueue(cfs_rq, se);
2704 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2706 static void update_cfs_shares(struct sched_entity *se)
2708 struct cfs_rq *cfs_rq = group_cfs_rq(se);
2709 struct task_group *tg;
2715 if (throttled_hierarchy(cfs_rq))
2721 if (likely(se->load.weight == tg->shares))
2724 shares = calc_cfs_shares(cfs_rq, tg);
2726 reweight_entity(cfs_rq_of(se), se, shares);
2729 #else /* CONFIG_FAIR_GROUP_SCHED */
2730 static inline void update_cfs_shares(struct sched_entity *se)
2733 #endif /* CONFIG_FAIR_GROUP_SCHED */
2736 /* Precomputed fixed inverse multiplies for multiplication by y^n */
2737 static const u32 runnable_avg_yN_inv[] = {
2738 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
2739 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
2740 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
2741 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
2742 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
2743 0x85aac367, 0x82cd8698,
2747 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
2748 * over-estimates when re-combining.
2750 static const u32 runnable_avg_yN_sum[] = {
2751 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
2752 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
2753 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
2757 * Precomputed \Sum y^k { 1<=k<=n, where n%32=0). Values are rolled down to
2758 * lower integers. See Documentation/scheduler/sched-avg.txt how these
2761 static const u32 __accumulated_sum_N32[] = {
2762 0, 23371, 35056, 40899, 43820, 45281,
2763 46011, 46376, 46559, 46650, 46696, 46719,
2768 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
2770 static u64 decay_load(u64 val, u64 n)
2772 unsigned int local_n;
2776 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
2779 /* after bounds checking we can collapse to 32-bit */
2783 * As y^PERIOD = 1/2, we can combine
2784 * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
2785 * With a look-up table which covers y^n (n<PERIOD)
2787 * To achieve constant time decay_load.
2789 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2790 val >>= local_n / LOAD_AVG_PERIOD;
2791 local_n %= LOAD_AVG_PERIOD;
2794 val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
2798 static u32 __accumulate_sum(u64 periods, u32 period_contrib, u32 remainder)
2800 u32 c1, c2, c3 = remainder; /* y^0 == 1 */
2803 return remainder - period_contrib;
2805 if (unlikely(periods >= LOAD_AVG_MAX_N))
2806 return LOAD_AVG_MAX;
2811 c1 = decay_load((u64)(1024 - period_contrib), periods);
2815 * For updates fully spanning n periods, the contribution to runnable
2818 * c2 = 1024 \Sum y^n
2820 * We can compute this reasonably efficiently by combining:
2822 * y^PERIOD = 1/2 with precomputed 1024 \Sum y^n {for: n < PERIOD}
2824 if (likely(periods <= LOAD_AVG_PERIOD)) {
2825 c2 = runnable_avg_yN_sum[periods];
2827 c2 = __accumulated_sum_N32[periods/LOAD_AVG_PERIOD];
2828 periods %= LOAD_AVG_PERIOD;
2829 c2 = decay_load(c2, periods);
2830 c2 += runnable_avg_yN_sum[periods];
2833 return c1 + c2 + c3;
2836 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
2839 * Accumulate the three separate parts of the sum; d1 the remainder
2840 * of the last (incomplete) period, d2 the span of full periods and d3
2841 * the remainder of the (incomplete) current period.
2846 * |<->|<----------------->|<--->|
2847 * ... |---x---|------| ... |------|-----x (now)
2850 * u' = (u + d1) y^(p+1) + 1024 \Sum y^n + d3 y^0
2853 * = u y^(p+1) + (Step 1)
2856 * d1 y^(p+1) + 1024 \Sum y^n + d3 y^0 (Step 2)
2859 static __always_inline u32
2860 accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
2861 unsigned long weight, int running, struct cfs_rq *cfs_rq)
2863 unsigned long scale_freq, scale_cpu;
2867 scale_freq = arch_scale_freq_capacity(NULL, cpu);
2868 scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
2870 delta += sa->period_contrib;
2871 periods = delta / 1024; /* A period is 1024us (~1ms) */
2874 * Step 1: decay old *_sum if we crossed period boundaries.
2877 sa->load_sum = decay_load(sa->load_sum, periods);
2879 cfs_rq->runnable_load_sum =
2880 decay_load(cfs_rq->runnable_load_sum, periods);
2882 sa->util_sum = decay_load((u64)(sa->util_sum), periods);
2889 contrib = __accumulate_sum(periods, sa->period_contrib, delta);
2890 sa->period_contrib = delta;
2892 contrib = cap_scale(contrib, scale_freq);
2894 sa->load_sum += weight * contrib;
2896 cfs_rq->runnable_load_sum += weight * contrib;
2899 sa->util_sum += contrib * scale_cpu;
2905 * We can represent the historical contribution to runnable average as the
2906 * coefficients of a geometric series. To do this we sub-divide our runnable
2907 * history into segments of approximately 1ms (1024us); label the segment that
2908 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
2910 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
2912 * (now) (~1ms ago) (~2ms ago)
2914 * Let u_i denote the fraction of p_i that the entity was runnable.
2916 * We then designate the fractions u_i as our co-efficients, yielding the
2917 * following representation of historical load:
2918 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
2920 * We choose y based on the with of a reasonably scheduling period, fixing:
2923 * This means that the contribution to load ~32ms ago (u_32) will be weighted
2924 * approximately half as much as the contribution to load within the last ms
2927 * When a period "rolls over" and we have new u_0`, multiplying the previous
2928 * sum again by y is sufficient to update:
2929 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2930 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2932 static __always_inline int
2933 ___update_load_avg(u64 now, int cpu, struct sched_avg *sa,
2934 unsigned long weight, int running, struct cfs_rq *cfs_rq)
2938 delta = now - sa->last_update_time;
2940 * This should only happen when time goes backwards, which it
2941 * unfortunately does during sched clock init when we swap over to TSC.
2943 if ((s64)delta < 0) {
2944 sa->last_update_time = now;
2949 * Use 1024ns as the unit of measurement since it's a reasonable
2950 * approximation of 1us and fast to compute.
2955 sa->last_update_time = now;
2958 * Now we know we crossed measurement unit boundaries. The *_avg
2959 * accrues by two steps:
2961 * Step 1: accumulate *_sum since last_update_time. If we haven't
2962 * crossed period boundaries, finish.
2964 if (!accumulate_sum(delta, cpu, sa, weight, running, cfs_rq))
2968 * Step 2: update *_avg.
2970 sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX);
2972 cfs_rq->runnable_load_avg =
2973 div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);
2975 sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
2981 __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
2983 return ___update_load_avg(now, cpu, &se->avg, 0, 0, NULL);
2987 __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
2989 return ___update_load_avg(now, cpu, &se->avg,
2990 se->on_rq * scale_load_down(se->load.weight),
2991 cfs_rq->curr == se, NULL);
2995 __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
2997 return ___update_load_avg(now, cpu, &cfs_rq->avg,
2998 scale_load_down(cfs_rq->load.weight),
2999 cfs_rq->curr != NULL, cfs_rq);
3003 * Signed add and clamp on underflow.
3005 * Explicitly do a load-store to ensure the intermediate value never hits
3006 * memory. This allows lockless observations without ever seeing the negative
3009 #define add_positive(_ptr, _val) do { \
3010 typeof(_ptr) ptr = (_ptr); \
3011 typeof(_val) val = (_val); \
3012 typeof(*ptr) res, var = READ_ONCE(*ptr); \
3016 if (val < 0 && res > var) \
3019 WRITE_ONCE(*ptr, res); \
3022 #ifdef CONFIG_FAIR_GROUP_SCHED
3024 * update_tg_load_avg - update the tg's load avg
3025 * @cfs_rq: the cfs_rq whose avg changed
3026 * @force: update regardless of how small the difference
3028 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
3029 * However, because tg->load_avg is a global value there are performance
3032 * In order to avoid having to look at the other cfs_rq's, we use a
3033 * differential update where we store the last value we propagated. This in
3034 * turn allows skipping updates if the differential is 'small'.
3036 * Updating tg's load_avg is necessary before update_cfs_share() (which is
3037 * done) and effective_load() (which is not done because it is too costly).
3039 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
3041 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
3044 * No need to update load_avg for root_task_group as it is not used.
3046 if (cfs_rq->tg == &root_task_group)
3049 if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
3050 atomic_long_add(delta, &cfs_rq->tg->load_avg);
3051 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
3056 * Called within set_task_rq() right before setting a task's cpu. The
3057 * caller only guarantees p->pi_lock is held; no other assumptions,
3058 * including the state of rq->lock, should be made.
3060 void set_task_rq_fair(struct sched_entity *se,
3061 struct cfs_rq *prev, struct cfs_rq *next)
3063 u64 p_last_update_time;
3064 u64 n_last_update_time;
3066 if (!sched_feat(ATTACH_AGE_LOAD))
3070 * We are supposed to update the task to "current" time, then its up to
3071 * date and ready to go to new CPU/cfs_rq. But we have difficulty in
3072 * getting what current time is, so simply throw away the out-of-date
3073 * time. This will result in the wakee task is less decayed, but giving
3074 * the wakee more load sounds not bad.
3076 if (!(se->avg.last_update_time && prev))
3079 #ifndef CONFIG_64BIT
3081 u64 p_last_update_time_copy;
3082 u64 n_last_update_time_copy;
3085 p_last_update_time_copy = prev->load_last_update_time_copy;
3086 n_last_update_time_copy = next->load_last_update_time_copy;
3090 p_last_update_time = prev->avg.last_update_time;
3091 n_last_update_time = next->avg.last_update_time;
3093 } while (p_last_update_time != p_last_update_time_copy ||
3094 n_last_update_time != n_last_update_time_copy);
3097 p_last_update_time = prev->avg.last_update_time;
3098 n_last_update_time = next->avg.last_update_time;
3100 __update_load_avg_blocked_se(p_last_update_time, cpu_of(rq_of(prev)), se);
3101 se->avg.last_update_time = n_last_update_time;
3104 /* Take into account change of utilization of a child task group */
3106 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se)
3108 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3109 long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
3111 /* Nothing to update */
3115 /* Set new sched_entity's utilization */
3116 se->avg.util_avg = gcfs_rq->avg.util_avg;
3117 se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX;
3119 /* Update parent cfs_rq utilization */
3120 add_positive(&cfs_rq->avg.util_avg, delta);
3121 cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX;
3124 /* Take into account change of load of a child task group */
3126 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se)
3128 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3129 long delta, load = gcfs_rq->avg.load_avg;
3132 * If the load of group cfs_rq is null, the load of the
3133 * sched_entity will also be null so we can skip the formula
3138 /* Get tg's load and ensure tg_load > 0 */
3139 tg_load = atomic_long_read(&gcfs_rq->tg->load_avg) + 1;
3141 /* Ensure tg_load >= load and updated with current load*/
3142 tg_load -= gcfs_rq->tg_load_avg_contrib;
3146 * We need to compute a correction term in the case that the
3147 * task group is consuming more CPU than a task of equal
3148 * weight. A task with a weight equals to tg->shares will have
3149 * a load less or equal to scale_load_down(tg->shares).
3150 * Similarly, the sched_entities that represent the task group
3151 * at parent level, can't have a load higher than
3152 * scale_load_down(tg->shares). And the Sum of sched_entities'
3153 * load must be <= scale_load_down(tg->shares).
3155 if (tg_load > scale_load_down(gcfs_rq->tg->shares)) {
3156 /* scale gcfs_rq's load into tg's shares*/
3157 load *= scale_load_down(gcfs_rq->tg->shares);
3162 delta = load - se->avg.load_avg;
3164 /* Nothing to update */
3168 /* Set new sched_entity's load */
3169 se->avg.load_avg = load;
3170 se->avg.load_sum = se->avg.load_avg * LOAD_AVG_MAX;
3172 /* Update parent cfs_rq load */
3173 add_positive(&cfs_rq->avg.load_avg, delta);
3174 cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * LOAD_AVG_MAX;
3177 * If the sched_entity is already enqueued, we also have to update the
3178 * runnable load avg.
3181 /* Update parent cfs_rq runnable_load_avg */
3182 add_positive(&cfs_rq->runnable_load_avg, delta);
3183 cfs_rq->runnable_load_sum = cfs_rq->runnable_load_avg * LOAD_AVG_MAX;
3187 static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq)
3189 cfs_rq->propagate_avg = 1;
3192 static inline int test_and_clear_tg_cfs_propagate(struct sched_entity *se)
3194 struct cfs_rq *cfs_rq = group_cfs_rq(se);
3196 if (!cfs_rq->propagate_avg)
3199 cfs_rq->propagate_avg = 0;
3203 /* Update task and its cfs_rq load average */
3204 static inline int propagate_entity_load_avg(struct sched_entity *se)
3206 struct cfs_rq *cfs_rq;
3208 if (entity_is_task(se))
3211 if (!test_and_clear_tg_cfs_propagate(se))
3214 cfs_rq = cfs_rq_of(se);
3216 set_tg_cfs_propagate(cfs_rq);
3218 update_tg_cfs_util(cfs_rq, se);
3219 update_tg_cfs_load(cfs_rq, se);
3225 * Check if we need to update the load and the utilization of a blocked
3228 static inline bool skip_blocked_update(struct sched_entity *se)
3230 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3233 * If sched_entity still have not zero load or utilization, we have to
3236 if (se->avg.load_avg || se->avg.util_avg)
3240 * If there is a pending propagation, we have to update the load and
3241 * the utilization of the sched_entity:
3243 if (gcfs_rq->propagate_avg)
3247 * Otherwise, the load and the utilization of the sched_entity is
3248 * already zero and there is no pending propagation, so it will be a
3249 * waste of time to try to decay it:
3254 #else /* CONFIG_FAIR_GROUP_SCHED */
3256 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
3258 static inline int propagate_entity_load_avg(struct sched_entity *se)
3263 static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}
3265 #endif /* CONFIG_FAIR_GROUP_SCHED */
3267 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
3269 if (&this_rq()->cfs == cfs_rq) {
3271 * There are a few boundary cases this might miss but it should
3272 * get called often enough that that should (hopefully) not be
3273 * a real problem -- added to that it only calls on the local
3274 * CPU, so if we enqueue remotely we'll miss an update, but
3275 * the next tick/schedule should update.
3277 * It will not get called when we go idle, because the idle
3278 * thread is a different class (!fair), nor will the utilization
3279 * number include things like RT tasks.
3281 * As is, the util number is not freq-invariant (we'd have to
3282 * implement arch_scale_freq_capacity() for that).
3286 cpufreq_update_util(rq_of(cfs_rq), 0);
3291 * Unsigned subtract and clamp on underflow.
3293 * Explicitly do a load-store to ensure the intermediate value never hits
3294 * memory. This allows lockless observations without ever seeing the negative
3297 #define sub_positive(_ptr, _val) do { \
3298 typeof(_ptr) ptr = (_ptr); \
3299 typeof(*ptr) val = (_val); \
3300 typeof(*ptr) res, var = READ_ONCE(*ptr); \
3304 WRITE_ONCE(*ptr, res); \
3308 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
3309 * @now: current time, as per cfs_rq_clock_task()
3310 * @cfs_rq: cfs_rq to update
3311 * @update_freq: should we call cfs_rq_util_change() or will the call do so
3313 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
3314 * avg. The immediate corollary is that all (fair) tasks must be attached, see
3315 * post_init_entity_util_avg().
3317 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
3319 * Returns true if the load decayed or we removed load.
3321 * Since both these conditions indicate a changed cfs_rq->avg.load we should
3322 * call update_tg_load_avg() when this function returns true.
3325 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
3327 struct sched_avg *sa = &cfs_rq->avg;
3328 int decayed, removed_load = 0, removed_util = 0;
3330 if (atomic_long_read(&cfs_rq->removed_load_avg)) {
3331 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
3332 sub_positive(&sa->load_avg, r);
3333 sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
3335 set_tg_cfs_propagate(cfs_rq);
3338 if (atomic_long_read(&cfs_rq->removed_util_avg)) {
3339 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
3340 sub_positive(&sa->util_avg, r);
3341 sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
3343 set_tg_cfs_propagate(cfs_rq);
3346 decayed = __update_load_avg_cfs_rq(now, cpu_of(rq_of(cfs_rq)), cfs_rq);
3348 #ifndef CONFIG_64BIT
3350 cfs_rq->load_last_update_time_copy = sa->last_update_time;
3353 if (update_freq && (decayed || removed_util))
3354 cfs_rq_util_change(cfs_rq);
3356 return decayed || removed_load;
3360 * Optional action to be done while updating the load average
3362 #define UPDATE_TG 0x1
3363 #define SKIP_AGE_LOAD 0x2
3365 /* Update task and its cfs_rq load average */
3366 static inline void update_load_avg(struct sched_entity *se, int flags)
3368 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3369 u64 now = cfs_rq_clock_task(cfs_rq);
3370 struct rq *rq = rq_of(cfs_rq);
3371 int cpu = cpu_of(rq);
3375 * Track task load average for carrying it to new CPU after migrated, and
3376 * track group sched_entity load average for task_h_load calc in migration
3378 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
3379 __update_load_avg_se(now, cpu, cfs_rq, se);
3381 decayed = update_cfs_rq_load_avg(now, cfs_rq, true);
3382 decayed |= propagate_entity_load_avg(se);
3384 if (decayed && (flags & UPDATE_TG))
3385 update_tg_load_avg(cfs_rq, 0);
3389 * attach_entity_load_avg - attach this entity to its cfs_rq load avg
3390 * @cfs_rq: cfs_rq to attach to
3391 * @se: sched_entity to attach
3393 * Must call update_cfs_rq_load_avg() before this, since we rely on
3394 * cfs_rq->avg.last_update_time being current.
3396 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3398 se->avg.last_update_time = cfs_rq->avg.last_update_time;
3399 cfs_rq->avg.load_avg += se->avg.load_avg;
3400 cfs_rq->avg.load_sum += se->avg.load_sum;
3401 cfs_rq->avg.util_avg += se->avg.util_avg;
3402 cfs_rq->avg.util_sum += se->avg.util_sum;
3403 set_tg_cfs_propagate(cfs_rq);
3405 cfs_rq_util_change(cfs_rq);
3409 * detach_entity_load_avg - detach this entity from its cfs_rq load avg
3410 * @cfs_rq: cfs_rq to detach from
3411 * @se: sched_entity to detach
3413 * Must call update_cfs_rq_load_avg() before this, since we rely on
3414 * cfs_rq->avg.last_update_time being current.
3416 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3419 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3420 sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
3421 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
3422 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
3423 set_tg_cfs_propagate(cfs_rq);
3425 cfs_rq_util_change(cfs_rq);
3428 /* Add the load generated by se into cfs_rq's load average */
3430 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3432 struct sched_avg *sa = &se->avg;
3434 cfs_rq->runnable_load_avg += sa->load_avg;
3435 cfs_rq->runnable_load_sum += sa->load_sum;
3437 if (!sa->last_update_time) {
3438 attach_entity_load_avg(cfs_rq, se);
3439 update_tg_load_avg(cfs_rq, 0);
3443 /* Remove the runnable load generated by se from cfs_rq's runnable load average */
3445 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3447 cfs_rq->runnable_load_avg =
3448 max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
3449 cfs_rq->runnable_load_sum =
3450 max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
3453 #ifndef CONFIG_64BIT
3454 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3456 u64 last_update_time_copy;
3457 u64 last_update_time;
3460 last_update_time_copy = cfs_rq->load_last_update_time_copy;
3462 last_update_time = cfs_rq->avg.last_update_time;
3463 } while (last_update_time != last_update_time_copy);
3465 return last_update_time;
3468 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3470 return cfs_rq->avg.last_update_time;
3475 * Synchronize entity load avg of dequeued entity without locking
3478 void sync_entity_load_avg(struct sched_entity *se)
3480 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3481 u64 last_update_time;
3483 last_update_time = cfs_rq_last_update_time(cfs_rq);
3484 __update_load_avg_blocked_se(last_update_time, cpu_of(rq_of(cfs_rq)), se);
3488 * Task first catches up with cfs_rq, and then subtract
3489 * itself from the cfs_rq (task must be off the queue now).
3491 void remove_entity_load_avg(struct sched_entity *se)
3493 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3496 * tasks cannot exit without having gone through wake_up_new_task() ->
3497 * post_init_entity_util_avg() which will have added things to the
3498 * cfs_rq, so we can remove unconditionally.
3500 * Similarly for groups, they will have passed through
3501 * post_init_entity_util_avg() before unregister_sched_fair_group()
3505 sync_entity_load_avg(se);
3506 atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
3507 atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
3510 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
3512 return cfs_rq->runnable_load_avg;
3515 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
3517 return cfs_rq->avg.load_avg;
3520 static int idle_balance(struct rq *this_rq, struct rq_flags *rf);
3522 #else /* CONFIG_SMP */
3525 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
3530 #define UPDATE_TG 0x0
3531 #define SKIP_AGE_LOAD 0x0
3533 static inline void update_load_avg(struct sched_entity *se, int not_used1)
3535 cpufreq_update_util(rq_of(cfs_rq_of(se)), 0);
3539 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3541 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3542 static inline void remove_entity_load_avg(struct sched_entity *se) {}
3545 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3547 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3549 static inline int idle_balance(struct rq *rq, struct rq_flags *rf)
3554 #endif /* CONFIG_SMP */
3556 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
3558 #ifdef CONFIG_SCHED_DEBUG
3559 s64 d = se->vruntime - cfs_rq->min_vruntime;
3564 if (d > 3*sysctl_sched_latency)
3565 schedstat_inc(cfs_rq->nr_spread_over);
3570 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
3572 u64 vruntime = cfs_rq->min_vruntime;
3575 * The 'current' period is already promised to the current tasks,
3576 * however the extra weight of the new task will slow them down a
3577 * little, place the new task so that it fits in the slot that
3578 * stays open at the end.
3580 if (initial && sched_feat(START_DEBIT))
3581 vruntime += sched_vslice(cfs_rq, se);
3583 /* sleeps up to a single latency don't count. */
3585 unsigned long thresh = sysctl_sched_latency;
3588 * Halve their sleep time's effect, to allow
3589 * for a gentler effect of sleepers:
3591 if (sched_feat(GENTLE_FAIR_SLEEPERS))
3597 /* ensure we never gain time by being placed backwards. */
3598 se->vruntime = max_vruntime(se->vruntime, vruntime);
3601 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
3603 static inline void check_schedstat_required(void)
3605 #ifdef CONFIG_SCHEDSTATS
3606 if (schedstat_enabled())
3609 /* Force schedstat enabled if a dependent tracepoint is active */
3610 if (trace_sched_stat_wait_enabled() ||
3611 trace_sched_stat_sleep_enabled() ||
3612 trace_sched_stat_iowait_enabled() ||
3613 trace_sched_stat_blocked_enabled() ||
3614 trace_sched_stat_runtime_enabled()) {
3615 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
3616 "stat_blocked and stat_runtime require the "
3617 "kernel parameter schedstats=enabled or "
3618 "kernel.sched_schedstats=1\n");
3629 * update_min_vruntime()
3630 * vruntime -= min_vruntime
3634 * update_min_vruntime()
3635 * vruntime += min_vruntime
3637 * this way the vruntime transition between RQs is done when both
3638 * min_vruntime are up-to-date.
3642 * ->migrate_task_rq_fair() (p->state == TASK_WAKING)
3643 * vruntime -= min_vruntime
3647 * update_min_vruntime()
3648 * vruntime += min_vruntime
3650 * this way we don't have the most up-to-date min_vruntime on the originating
3651 * CPU and an up-to-date min_vruntime on the destination CPU.
3655 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3657 bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
3658 bool curr = cfs_rq->curr == se;
3661 * If we're the current task, we must renormalise before calling
3665 se->vruntime += cfs_rq->min_vruntime;
3667 update_curr(cfs_rq);
3670 * Otherwise, renormalise after, such that we're placed at the current
3671 * moment in time, instead of some random moment in the past. Being
3672 * placed in the past could significantly boost this task to the
3673 * fairness detriment of existing tasks.
3675 if (renorm && !curr)
3676 se->vruntime += cfs_rq->min_vruntime;
3679 * When enqueuing a sched_entity, we must:
3680 * - Update loads to have both entity and cfs_rq synced with now.
3681 * - Add its load to cfs_rq->runnable_avg
3682 * - For group_entity, update its weight to reflect the new share of
3684 * - Add its new weight to cfs_rq->load.weight
3686 update_load_avg(se, UPDATE_TG);
3687 enqueue_entity_load_avg(cfs_rq, se);
3688 update_cfs_shares(se);
3689 account_entity_enqueue(cfs_rq, se);
3691 if (flags & ENQUEUE_WAKEUP)
3692 place_entity(cfs_rq, se, 0);
3694 check_schedstat_required();
3695 update_stats_enqueue(cfs_rq, se, flags);
3696 check_spread(cfs_rq, se);
3698 __enqueue_entity(cfs_rq, se);
3701 if (cfs_rq->nr_running == 1) {
3702 list_add_leaf_cfs_rq(cfs_rq);
3703 check_enqueue_throttle(cfs_rq);
3707 static void __clear_buddies_last(struct sched_entity *se)
3709 for_each_sched_entity(se) {
3710 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3711 if (cfs_rq->last != se)
3714 cfs_rq->last = NULL;
3718 static void __clear_buddies_next(struct sched_entity *se)
3720 for_each_sched_entity(se) {
3721 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3722 if (cfs_rq->next != se)
3725 cfs_rq->next = NULL;
3729 static void __clear_buddies_skip(struct sched_entity *se)
3731 for_each_sched_entity(se) {
3732 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3733 if (cfs_rq->skip != se)
3736 cfs_rq->skip = NULL;
3740 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
3742 if (cfs_rq->last == se)
3743 __clear_buddies_last(se);
3745 if (cfs_rq->next == se)
3746 __clear_buddies_next(se);
3748 if (cfs_rq->skip == se)
3749 __clear_buddies_skip(se);
3752 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3755 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3758 * Update run-time statistics of the 'current'.
3760 update_curr(cfs_rq);
3763 * When dequeuing a sched_entity, we must:
3764 * - Update loads to have both entity and cfs_rq synced with now.
3765 * - Substract its load from the cfs_rq->runnable_avg.
3766 * - Substract its previous weight from cfs_rq->load.weight.
3767 * - For group entity, update its weight to reflect the new share
3768 * of its group cfs_rq.
3770 update_load_avg(se, UPDATE_TG);
3771 dequeue_entity_load_avg(cfs_rq, se);
3773 update_stats_dequeue(cfs_rq, se, flags);
3775 clear_buddies(cfs_rq, se);
3777 if (se != cfs_rq->curr)
3778 __dequeue_entity(cfs_rq, se);
3780 account_entity_dequeue(cfs_rq, se);
3783 * Normalize after update_curr(); which will also have moved
3784 * min_vruntime if @se is the one holding it back. But before doing
3785 * update_min_vruntime() again, which will discount @se's position and
3786 * can move min_vruntime forward still more.
3788 if (!(flags & DEQUEUE_SLEEP))
3789 se->vruntime -= cfs_rq->min_vruntime;
3791 /* return excess runtime on last dequeue */
3792 return_cfs_rq_runtime(cfs_rq);
3794 update_cfs_shares(se);
3797 * Now advance min_vruntime if @se was the entity holding it back,
3798 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
3799 * put back on, and if we advance min_vruntime, we'll be placed back
3800 * further than we started -- ie. we'll be penalized.
3802 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
3803 update_min_vruntime(cfs_rq);
3807 * Preempt the current task with a newly woken task if needed:
3810 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3812 unsigned long ideal_runtime, delta_exec;
3813 struct sched_entity *se;
3816 ideal_runtime = sched_slice(cfs_rq, curr);
3817 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
3818 if (delta_exec > ideal_runtime) {
3819 resched_curr(rq_of(cfs_rq));
3821 * The current task ran long enough, ensure it doesn't get
3822 * re-elected due to buddy favours.
3824 clear_buddies(cfs_rq, curr);
3829 * Ensure that a task that missed wakeup preemption by a
3830 * narrow margin doesn't have to wait for a full slice.
3831 * This also mitigates buddy induced latencies under load.
3833 if (delta_exec < sysctl_sched_min_granularity)
3836 se = __pick_first_entity(cfs_rq);
3837 delta = curr->vruntime - se->vruntime;
3842 if (delta > ideal_runtime)
3843 resched_curr(rq_of(cfs_rq));
3847 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
3849 /* 'current' is not kept within the tree. */
3852 * Any task has to be enqueued before it get to execute on
3853 * a CPU. So account for the time it spent waiting on the
3856 update_stats_wait_end(cfs_rq, se);
3857 __dequeue_entity(cfs_rq, se);
3858 update_load_avg(se, UPDATE_TG);
3861 update_stats_curr_start(cfs_rq, se);
3865 * Track our maximum slice length, if the CPU's load is at
3866 * least twice that of our own weight (i.e. dont track it
3867 * when there are only lesser-weight tasks around):
3869 if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
3870 schedstat_set(se->statistics.slice_max,
3871 max((u64)schedstat_val(se->statistics.slice_max),
3872 se->sum_exec_runtime - se->prev_sum_exec_runtime));
3875 se->prev_sum_exec_runtime = se->sum_exec_runtime;
3879 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
3882 * Pick the next process, keeping these things in mind, in this order:
3883 * 1) keep things fair between processes/task groups
3884 * 2) pick the "next" process, since someone really wants that to run
3885 * 3) pick the "last" process, for cache locality
3886 * 4) do not run the "skip" process, if something else is available
3888 static struct sched_entity *
3889 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3891 struct sched_entity *left = __pick_first_entity(cfs_rq);
3892 struct sched_entity *se;
3895 * If curr is set we have to see if its left of the leftmost entity
3896 * still in the tree, provided there was anything in the tree at all.
3898 if (!left || (curr && entity_before(curr, left)))
3901 se = left; /* ideally we run the leftmost entity */
3904 * Avoid running the skip buddy, if running something else can
3905 * be done without getting too unfair.
3907 if (cfs_rq->skip == se) {
3908 struct sched_entity *second;
3911 second = __pick_first_entity(cfs_rq);
3913 second = __pick_next_entity(se);
3914 if (!second || (curr && entity_before(curr, second)))
3918 if (second && wakeup_preempt_entity(second, left) < 1)
3923 * Prefer last buddy, try to return the CPU to a preempted task.
3925 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
3929 * Someone really wants this to run. If it's not unfair, run it.
3931 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
3934 clear_buddies(cfs_rq, se);
3939 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3941 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
3944 * If still on the runqueue then deactivate_task()
3945 * was not called and update_curr() has to be done:
3948 update_curr(cfs_rq);
3950 /* throttle cfs_rqs exceeding runtime */
3951 check_cfs_rq_runtime(cfs_rq);
3953 check_spread(cfs_rq, prev);
3956 update_stats_wait_start(cfs_rq, prev);
3957 /* Put 'current' back into the tree. */
3958 __enqueue_entity(cfs_rq, prev);
3959 /* in !on_rq case, update occurred at dequeue */
3960 update_load_avg(prev, 0);
3962 cfs_rq->curr = NULL;
3966 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
3969 * Update run-time statistics of the 'current'.
3971 update_curr(cfs_rq);
3974 * Ensure that runnable average is periodically updated.
3976 update_load_avg(curr, UPDATE_TG);
3977 update_cfs_shares(curr);
3979 #ifdef CONFIG_SCHED_HRTICK
3981 * queued ticks are scheduled to match the slice, so don't bother
3982 * validating it and just reschedule.
3985 resched_curr(rq_of(cfs_rq));
3989 * don't let the period tick interfere with the hrtick preemption
3991 if (!sched_feat(DOUBLE_TICK) &&
3992 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
3996 if (cfs_rq->nr_running > 1)
3997 check_preempt_tick(cfs_rq, curr);
4001 /**************************************************
4002 * CFS bandwidth control machinery
4005 #ifdef CONFIG_CFS_BANDWIDTH
4007 #ifdef HAVE_JUMP_LABEL
4008 static struct static_key __cfs_bandwidth_used;
4010 static inline bool cfs_bandwidth_used(void)
4012 return static_key_false(&__cfs_bandwidth_used);
4015 void cfs_bandwidth_usage_inc(void)
4017 static_key_slow_inc(&__cfs_bandwidth_used);
4020 void cfs_bandwidth_usage_dec(void)
4022 static_key_slow_dec(&__cfs_bandwidth_used);
4024 #else /* HAVE_JUMP_LABEL */
4025 static bool cfs_bandwidth_used(void)
4030 void cfs_bandwidth_usage_inc(void) {}
4031 void cfs_bandwidth_usage_dec(void) {}
4032 #endif /* HAVE_JUMP_LABEL */
4035 * default period for cfs group bandwidth.
4036 * default: 0.1s, units: nanoseconds
4038 static inline u64 default_cfs_period(void)
4040 return 100000000ULL;
4043 static inline u64 sched_cfs_bandwidth_slice(void)
4045 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
4049 * Replenish runtime according to assigned quota and update expiration time.
4050 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
4051 * additional synchronization around rq->lock.
4053 * requires cfs_b->lock
4055 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
4059 if (cfs_b->quota == RUNTIME_INF)
4062 now = sched_clock_cpu(smp_processor_id());
4063 cfs_b->runtime = cfs_b->quota;
4064 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
4067 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4069 return &tg->cfs_bandwidth;
4072 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
4073 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
4075 if (unlikely(cfs_rq->throttle_count))
4076 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
4078 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
4081 /* returns 0 on failure to allocate runtime */
4082 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4084 struct task_group *tg = cfs_rq->tg;
4085 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
4086 u64 amount = 0, min_amount, expires;
4088 /* note: this is a positive sum as runtime_remaining <= 0 */
4089 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
4091 raw_spin_lock(&cfs_b->lock);
4092 if (cfs_b->quota == RUNTIME_INF)
4093 amount = min_amount;
4095 start_cfs_bandwidth(cfs_b);
4097 if (cfs_b->runtime > 0) {
4098 amount = min(cfs_b->runtime, min_amount);
4099 cfs_b->runtime -= amount;
4103 expires = cfs_b->runtime_expires;
4104 raw_spin_unlock(&cfs_b->lock);
4106 cfs_rq->runtime_remaining += amount;
4108 * we may have advanced our local expiration to account for allowed
4109 * spread between our sched_clock and the one on which runtime was
4112 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
4113 cfs_rq->runtime_expires = expires;
4115 return cfs_rq->runtime_remaining > 0;
4119 * Note: This depends on the synchronization provided by sched_clock and the
4120 * fact that rq->clock snapshots this value.
4122 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4124 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4126 /* if the deadline is ahead of our clock, nothing to do */
4127 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
4130 if (cfs_rq->runtime_remaining < 0)
4134 * If the local deadline has passed we have to consider the
4135 * possibility that our sched_clock is 'fast' and the global deadline
4136 * has not truly expired.
4138 * Fortunately we can check determine whether this the case by checking
4139 * whether the global deadline has advanced. It is valid to compare
4140 * cfs_b->runtime_expires without any locks since we only care about
4141 * exact equality, so a partial write will still work.
4144 if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
4145 /* extend local deadline, drift is bounded above by 2 ticks */
4146 cfs_rq->runtime_expires += TICK_NSEC;
4148 /* global deadline is ahead, expiration has passed */
4149 cfs_rq->runtime_remaining = 0;
4153 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
4155 /* dock delta_exec before expiring quota (as it could span periods) */
4156 cfs_rq->runtime_remaining -= delta_exec;
4157 expire_cfs_rq_runtime(cfs_rq);
4159 if (likely(cfs_rq->runtime_remaining > 0))
4163 * if we're unable to extend our runtime we resched so that the active
4164 * hierarchy can be throttled
4166 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
4167 resched_curr(rq_of(cfs_rq));
4170 static __always_inline
4171 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
4173 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
4176 __account_cfs_rq_runtime(cfs_rq, delta_exec);
4179 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4181 return cfs_bandwidth_used() && cfs_rq->throttled;
4184 /* check whether cfs_rq, or any parent, is throttled */
4185 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4187 return cfs_bandwidth_used() && cfs_rq->throttle_count;
4191 * Ensure that neither of the group entities corresponding to src_cpu or
4192 * dest_cpu are members of a throttled hierarchy when performing group
4193 * load-balance operations.
4195 static inline int throttled_lb_pair(struct task_group *tg,
4196 int src_cpu, int dest_cpu)
4198 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
4200 src_cfs_rq = tg->cfs_rq[src_cpu];
4201 dest_cfs_rq = tg->cfs_rq[dest_cpu];
4203 return throttled_hierarchy(src_cfs_rq) ||
4204 throttled_hierarchy(dest_cfs_rq);
4207 /* updated child weight may affect parent so we have to do this bottom up */
4208 static int tg_unthrottle_up(struct task_group *tg, void *data)
4210 struct rq *rq = data;
4211 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4213 cfs_rq->throttle_count--;
4214 if (!cfs_rq->throttle_count) {
4215 /* adjust cfs_rq_clock_task() */
4216 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
4217 cfs_rq->throttled_clock_task;
4223 static int tg_throttle_down(struct task_group *tg, void *data)
4225 struct rq *rq = data;
4226 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4228 /* group is entering throttled state, stop time */
4229 if (!cfs_rq->throttle_count)
4230 cfs_rq->throttled_clock_task = rq_clock_task(rq);
4231 cfs_rq->throttle_count++;
4236 static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
4238 struct rq *rq = rq_of(cfs_rq);
4239 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4240 struct sched_entity *se;
4241 long task_delta, dequeue = 1;
4244 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
4246 /* freeze hierarchy runnable averages while throttled */
4248 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
4251 task_delta = cfs_rq->h_nr_running;
4252 for_each_sched_entity(se) {
4253 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
4254 /* throttled entity or throttle-on-deactivate */
4259 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
4260 qcfs_rq->h_nr_running -= task_delta;
4262 if (qcfs_rq->load.weight)
4267 sub_nr_running(rq, task_delta);
4269 cfs_rq->throttled = 1;
4270 cfs_rq->throttled_clock = rq_clock(rq);
4271 raw_spin_lock(&cfs_b->lock);
4272 empty = list_empty(&cfs_b->throttled_cfs_rq);
4275 * Add to the _head_ of the list, so that an already-started
4276 * distribute_cfs_runtime will not see us
4278 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
4281 * If we're the first throttled task, make sure the bandwidth
4285 start_cfs_bandwidth(cfs_b);
4287 raw_spin_unlock(&cfs_b->lock);
4290 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
4292 struct rq *rq = rq_of(cfs_rq);
4293 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4294 struct sched_entity *se;
4298 se = cfs_rq->tg->se[cpu_of(rq)];
4300 cfs_rq->throttled = 0;
4302 update_rq_clock(rq);
4304 raw_spin_lock(&cfs_b->lock);
4305 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
4306 list_del_rcu(&cfs_rq->throttled_list);
4307 raw_spin_unlock(&cfs_b->lock);
4309 /* update hierarchical throttle state */
4310 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
4312 if (!cfs_rq->load.weight)
4315 task_delta = cfs_rq->h_nr_running;
4316 for_each_sched_entity(se) {
4320 cfs_rq = cfs_rq_of(se);
4322 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
4323 cfs_rq->h_nr_running += task_delta;
4325 if (cfs_rq_throttled(cfs_rq))
4330 add_nr_running(rq, task_delta);
4332 /* determine whether we need to wake up potentially idle cpu */
4333 if (rq->curr == rq->idle && rq->cfs.nr_running)
4337 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
4338 u64 remaining, u64 expires)
4340 struct cfs_rq *cfs_rq;
4342 u64 starting_runtime = remaining;
4345 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
4347 struct rq *rq = rq_of(cfs_rq);
4351 if (!cfs_rq_throttled(cfs_rq))
4354 runtime = -cfs_rq->runtime_remaining + 1;
4355 if (runtime > remaining)
4356 runtime = remaining;
4357 remaining -= runtime;
4359 cfs_rq->runtime_remaining += runtime;
4360 cfs_rq->runtime_expires = expires;
4362 /* we check whether we're throttled above */
4363 if (cfs_rq->runtime_remaining > 0)
4364 unthrottle_cfs_rq(cfs_rq);
4374 return starting_runtime - remaining;
4378 * Responsible for refilling a task_group's bandwidth and unthrottling its
4379 * cfs_rqs as appropriate. If there has been no activity within the last
4380 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
4381 * used to track this state.
4383 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
4385 u64 runtime, runtime_expires;
4388 /* no need to continue the timer with no bandwidth constraint */
4389 if (cfs_b->quota == RUNTIME_INF)
4390 goto out_deactivate;
4392 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
4393 cfs_b->nr_periods += overrun;
4396 * idle depends on !throttled (for the case of a large deficit), and if
4397 * we're going inactive then everything else can be deferred
4399 if (cfs_b->idle && !throttled)
4400 goto out_deactivate;
4402 __refill_cfs_bandwidth_runtime(cfs_b);
4405 /* mark as potentially idle for the upcoming period */
4410 /* account preceding periods in which throttling occurred */
4411 cfs_b->nr_throttled += overrun;
4413 runtime_expires = cfs_b->runtime_expires;
4416 * This check is repeated as we are holding onto the new bandwidth while
4417 * we unthrottle. This can potentially race with an unthrottled group
4418 * trying to acquire new bandwidth from the global pool. This can result
4419 * in us over-using our runtime if it is all used during this loop, but
4420 * only by limited amounts in that extreme case.
4422 while (throttled && cfs_b->runtime > 0) {
4423 runtime = cfs_b->runtime;
4424 raw_spin_unlock(&cfs_b->lock);
4425 /* we can't nest cfs_b->lock while distributing bandwidth */
4426 runtime = distribute_cfs_runtime(cfs_b, runtime,
4428 raw_spin_lock(&cfs_b->lock);
4430 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
4432 cfs_b->runtime -= min(runtime, cfs_b->runtime);
4436 * While we are ensured activity in the period following an
4437 * unthrottle, this also covers the case in which the new bandwidth is
4438 * insufficient to cover the existing bandwidth deficit. (Forcing the
4439 * timer to remain active while there are any throttled entities.)
4449 /* a cfs_rq won't donate quota below this amount */
4450 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
4451 /* minimum remaining period time to redistribute slack quota */
4452 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
4453 /* how long we wait to gather additional slack before distributing */
4454 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
4457 * Are we near the end of the current quota period?
4459 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
4460 * hrtimer base being cleared by hrtimer_start. In the case of
4461 * migrate_hrtimers, base is never cleared, so we are fine.
4463 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
4465 struct hrtimer *refresh_timer = &cfs_b->period_timer;
4468 /* if the call-back is running a quota refresh is already occurring */
4469 if (hrtimer_callback_running(refresh_timer))
4472 /* is a quota refresh about to occur? */
4473 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
4474 if (remaining < min_expire)
4480 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
4482 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
4484 /* if there's a quota refresh soon don't bother with slack */
4485 if (runtime_refresh_within(cfs_b, min_left))
4488 hrtimer_start(&cfs_b->slack_timer,
4489 ns_to_ktime(cfs_bandwidth_slack_period),
4493 /* we know any runtime found here is valid as update_curr() precedes return */
4494 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4496 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4497 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
4499 if (slack_runtime <= 0)
4502 raw_spin_lock(&cfs_b->lock);
4503 if (cfs_b->quota != RUNTIME_INF &&
4504 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
4505 cfs_b->runtime += slack_runtime;
4507 /* we are under rq->lock, defer unthrottling using a timer */
4508 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
4509 !list_empty(&cfs_b->throttled_cfs_rq))
4510 start_cfs_slack_bandwidth(cfs_b);
4512 raw_spin_unlock(&cfs_b->lock);
4514 /* even if it's not valid for return we don't want to try again */
4515 cfs_rq->runtime_remaining -= slack_runtime;
4518 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4520 if (!cfs_bandwidth_used())
4523 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
4526 __return_cfs_rq_runtime(cfs_rq);
4530 * This is done with a timer (instead of inline with bandwidth return) since
4531 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
4533 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
4535 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
4538 /* confirm we're still not at a refresh boundary */
4539 raw_spin_lock(&cfs_b->lock);
4540 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
4541 raw_spin_unlock(&cfs_b->lock);
4545 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
4546 runtime = cfs_b->runtime;
4548 expires = cfs_b->runtime_expires;
4549 raw_spin_unlock(&cfs_b->lock);
4554 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
4556 raw_spin_lock(&cfs_b->lock);
4557 if (expires == cfs_b->runtime_expires)
4558 cfs_b->runtime -= min(runtime, cfs_b->runtime);
4559 raw_spin_unlock(&cfs_b->lock);
4563 * When a group wakes up we want to make sure that its quota is not already
4564 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
4565 * runtime as update_curr() throttling can not not trigger until it's on-rq.
4567 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
4569 if (!cfs_bandwidth_used())
4572 /* an active group must be handled by the update_curr()->put() path */
4573 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
4576 /* ensure the group is not already throttled */
4577 if (cfs_rq_throttled(cfs_rq))
4580 /* update runtime allocation */
4581 account_cfs_rq_runtime(cfs_rq, 0);
4582 if (cfs_rq->runtime_remaining <= 0)
4583 throttle_cfs_rq(cfs_rq);
4586 static void sync_throttle(struct task_group *tg, int cpu)
4588 struct cfs_rq *pcfs_rq, *cfs_rq;
4590 if (!cfs_bandwidth_used())
4596 cfs_rq = tg->cfs_rq[cpu];
4597 pcfs_rq = tg->parent->cfs_rq[cpu];
4599 cfs_rq->throttle_count = pcfs_rq->throttle_count;
4600 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
4603 /* conditionally throttle active cfs_rq's from put_prev_entity() */
4604 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4606 if (!cfs_bandwidth_used())
4609 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
4613 * it's possible for a throttled entity to be forced into a running
4614 * state (e.g. set_curr_task), in this case we're finished.
4616 if (cfs_rq_throttled(cfs_rq))
4619 throttle_cfs_rq(cfs_rq);
4623 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
4625 struct cfs_bandwidth *cfs_b =
4626 container_of(timer, struct cfs_bandwidth, slack_timer);
4628 do_sched_cfs_slack_timer(cfs_b);
4630 return HRTIMER_NORESTART;
4633 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
4635 struct cfs_bandwidth *cfs_b =
4636 container_of(timer, struct cfs_bandwidth, period_timer);
4640 raw_spin_lock(&cfs_b->lock);
4642 overrun = hrtimer_forward_now(timer, cfs_b->period);
4646 idle = do_sched_cfs_period_timer(cfs_b, overrun);
4649 cfs_b->period_active = 0;
4650 raw_spin_unlock(&cfs_b->lock);
4652 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
4655 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4657 raw_spin_lock_init(&cfs_b->lock);
4659 cfs_b->quota = RUNTIME_INF;
4660 cfs_b->period = ns_to_ktime(default_cfs_period());
4662 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
4663 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
4664 cfs_b->period_timer.function = sched_cfs_period_timer;
4665 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4666 cfs_b->slack_timer.function = sched_cfs_slack_timer;
4669 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4671 cfs_rq->runtime_enabled = 0;
4672 INIT_LIST_HEAD(&cfs_rq->throttled_list);
4675 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4677 lockdep_assert_held(&cfs_b->lock);
4679 if (!cfs_b->period_active) {
4680 cfs_b->period_active = 1;
4681 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
4682 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
4686 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4688 /* init_cfs_bandwidth() was not called */
4689 if (!cfs_b->throttled_cfs_rq.next)
4692 hrtimer_cancel(&cfs_b->period_timer);
4693 hrtimer_cancel(&cfs_b->slack_timer);
4696 static void __maybe_unused update_runtime_enabled(struct rq *rq)
4698 struct cfs_rq *cfs_rq;
4700 for_each_leaf_cfs_rq(rq, cfs_rq) {
4701 struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
4703 raw_spin_lock(&cfs_b->lock);
4704 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
4705 raw_spin_unlock(&cfs_b->lock);
4709 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
4711 struct cfs_rq *cfs_rq;
4713 for_each_leaf_cfs_rq(rq, cfs_rq) {
4714 if (!cfs_rq->runtime_enabled)
4718 * clock_task is not advancing so we just need to make sure
4719 * there's some valid quota amount
4721 cfs_rq->runtime_remaining = 1;
4723 * Offline rq is schedulable till cpu is completely disabled
4724 * in take_cpu_down(), so we prevent new cfs throttling here.
4726 cfs_rq->runtime_enabled = 0;
4728 if (cfs_rq_throttled(cfs_rq))
4729 unthrottle_cfs_rq(cfs_rq);
4733 #else /* CONFIG_CFS_BANDWIDTH */
4734 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
4736 return rq_clock_task(rq_of(cfs_rq));
4739 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
4740 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
4741 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
4742 static inline void sync_throttle(struct task_group *tg, int cpu) {}
4743 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4745 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4750 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4755 static inline int throttled_lb_pair(struct task_group *tg,
4756 int src_cpu, int dest_cpu)
4761 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4763 #ifdef CONFIG_FAIR_GROUP_SCHED
4764 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4767 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4771 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4772 static inline void update_runtime_enabled(struct rq *rq) {}
4773 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
4775 #endif /* CONFIG_CFS_BANDWIDTH */
4777 /**************************************************
4778 * CFS operations on tasks:
4781 #ifdef CONFIG_SCHED_HRTICK
4782 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
4784 struct sched_entity *se = &p->se;
4785 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4787 SCHED_WARN_ON(task_rq(p) != rq);
4789 if (rq->cfs.h_nr_running > 1) {
4790 u64 slice = sched_slice(cfs_rq, se);
4791 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
4792 s64 delta = slice - ran;
4799 hrtick_start(rq, delta);
4804 * called from enqueue/dequeue and updates the hrtick when the
4805 * current task is from our class and nr_running is low enough
4808 static void hrtick_update(struct rq *rq)
4810 struct task_struct *curr = rq->curr;
4812 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
4815 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
4816 hrtick_start_fair(rq, curr);
4818 #else /* !CONFIG_SCHED_HRTICK */
4820 hrtick_start_fair(struct rq *rq, struct task_struct *p)
4824 static inline void hrtick_update(struct rq *rq)
4830 * The enqueue_task method is called before nr_running is
4831 * increased. Here we update the fair scheduling stats and
4832 * then put the task into the rbtree:
4835 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4837 struct cfs_rq *cfs_rq;
4838 struct sched_entity *se = &p->se;
4841 * If in_iowait is set, the code below may not trigger any cpufreq
4842 * utilization updates, so do it here explicitly with the IOWAIT flag
4846 cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_IOWAIT);
4848 for_each_sched_entity(se) {
4851 cfs_rq = cfs_rq_of(se);
4852 enqueue_entity(cfs_rq, se, flags);
4855 * end evaluation on encountering a throttled cfs_rq
4857 * note: in the case of encountering a throttled cfs_rq we will
4858 * post the final h_nr_running increment below.
4860 if (cfs_rq_throttled(cfs_rq))
4862 cfs_rq->h_nr_running++;
4864 flags = ENQUEUE_WAKEUP;
4867 for_each_sched_entity(se) {
4868 cfs_rq = cfs_rq_of(se);
4869 cfs_rq->h_nr_running++;
4871 if (cfs_rq_throttled(cfs_rq))
4874 update_load_avg(se, UPDATE_TG);
4875 update_cfs_shares(se);
4879 add_nr_running(rq, 1);
4884 static void set_next_buddy(struct sched_entity *se);
4887 * The dequeue_task method is called before nr_running is
4888 * decreased. We remove the task from the rbtree and
4889 * update the fair scheduling stats:
4891 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4893 struct cfs_rq *cfs_rq;
4894 struct sched_entity *se = &p->se;
4895 int task_sleep = flags & DEQUEUE_SLEEP;
4897 for_each_sched_entity(se) {
4898 cfs_rq = cfs_rq_of(se);
4899 dequeue_entity(cfs_rq, se, flags);
4902 * end evaluation on encountering a throttled cfs_rq
4904 * note: in the case of encountering a throttled cfs_rq we will
4905 * post the final h_nr_running decrement below.
4907 if (cfs_rq_throttled(cfs_rq))
4909 cfs_rq->h_nr_running--;
4911 /* Don't dequeue parent if it has other entities besides us */
4912 if (cfs_rq->load.weight) {
4913 /* Avoid re-evaluating load for this entity: */
4914 se = parent_entity(se);
4916 * Bias pick_next to pick a task from this cfs_rq, as
4917 * p is sleeping when it is within its sched_slice.
4919 if (task_sleep && se && !throttled_hierarchy(cfs_rq))
4923 flags |= DEQUEUE_SLEEP;
4926 for_each_sched_entity(se) {
4927 cfs_rq = cfs_rq_of(se);
4928 cfs_rq->h_nr_running--;
4930 if (cfs_rq_throttled(cfs_rq))
4933 update_load_avg(se, UPDATE_TG);
4934 update_cfs_shares(se);
4938 sub_nr_running(rq, 1);
4945 /* Working cpumask for: load_balance, load_balance_newidle. */
4946 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
4947 DEFINE_PER_CPU(cpumask_var_t, select_idle_mask);
4949 #ifdef CONFIG_NO_HZ_COMMON
4951 * per rq 'load' arrray crap; XXX kill this.
4955 * The exact cpuload calculated at every tick would be:
4957 * load' = (1 - 1/2^i) * load + (1/2^i) * cur_load
4959 * If a cpu misses updates for n ticks (as it was idle) and update gets
4960 * called on the n+1-th tick when cpu may be busy, then we have:
4962 * load_n = (1 - 1/2^i)^n * load_0
4963 * load_n+1 = (1 - 1/2^i) * load_n + (1/2^i) * cur_load
4965 * decay_load_missed() below does efficient calculation of
4967 * load' = (1 - 1/2^i)^n * load
4969 * Because x^(n+m) := x^n * x^m we can decompose any x^n in power-of-2 factors.
4970 * This allows us to precompute the above in said factors, thereby allowing the
4971 * reduction of an arbitrary n in O(log_2 n) steps. (See also
4972 * fixed_power_int())
4974 * The calculation is approximated on a 128 point scale.
4976 #define DEGRADE_SHIFT 7
4978 static const u8 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
4979 static const u8 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
4980 { 0, 0, 0, 0, 0, 0, 0, 0 },
4981 { 64, 32, 8, 0, 0, 0, 0, 0 },
4982 { 96, 72, 40, 12, 1, 0, 0, 0 },
4983 { 112, 98, 75, 43, 15, 1, 0, 0 },
4984 { 120, 112, 98, 76, 45, 16, 2, 0 }
4988 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
4989 * would be when CPU is idle and so we just decay the old load without
4990 * adding any new load.
4992 static unsigned long
4993 decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
4997 if (!missed_updates)
5000 if (missed_updates >= degrade_zero_ticks[idx])
5004 return load >> missed_updates;
5006 while (missed_updates) {
5007 if (missed_updates % 2)
5008 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
5010 missed_updates >>= 1;
5015 #endif /* CONFIG_NO_HZ_COMMON */
5018 * __cpu_load_update - update the rq->cpu_load[] statistics
5019 * @this_rq: The rq to update statistics for
5020 * @this_load: The current load
5021 * @pending_updates: The number of missed updates
5023 * Update rq->cpu_load[] statistics. This function is usually called every
5024 * scheduler tick (TICK_NSEC).
5026 * This function computes a decaying average:
5028 * load[i]' = (1 - 1/2^i) * load[i] + (1/2^i) * load
5030 * Because of NOHZ it might not get called on every tick which gives need for
5031 * the @pending_updates argument.
5033 * load[i]_n = (1 - 1/2^i) * load[i]_n-1 + (1/2^i) * load_n-1
5034 * = A * load[i]_n-1 + B ; A := (1 - 1/2^i), B := (1/2^i) * load
5035 * = A * (A * load[i]_n-2 + B) + B
5036 * = A * (A * (A * load[i]_n-3 + B) + B) + B
5037 * = A^3 * load[i]_n-3 + (A^2 + A + 1) * B
5038 * = A^n * load[i]_0 + (A^(n-1) + A^(n-2) + ... + 1) * B
5039 * = A^n * load[i]_0 + ((1 - A^n) / (1 - A)) * B
5040 * = (1 - 1/2^i)^n * (load[i]_0 - load) + load
5042 * In the above we've assumed load_n := load, which is true for NOHZ_FULL as
5043 * any change in load would have resulted in the tick being turned back on.
5045 * For regular NOHZ, this reduces to:
5047 * load[i]_n = (1 - 1/2^i)^n * load[i]_0
5049 * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra
5052 static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
5053 unsigned long pending_updates)
5055 unsigned long __maybe_unused tickless_load = this_rq->cpu_load[0];
5058 this_rq->nr_load_updates++;
5060 /* Update our load: */
5061 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
5062 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
5063 unsigned long old_load, new_load;
5065 /* scale is effectively 1 << i now, and >> i divides by scale */
5067 old_load = this_rq->cpu_load[i];
5068 #ifdef CONFIG_NO_HZ_COMMON
5069 old_load = decay_load_missed(old_load, pending_updates - 1, i);
5070 if (tickless_load) {
5071 old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
5073 * old_load can never be a negative value because a
5074 * decayed tickless_load cannot be greater than the
5075 * original tickless_load.
5077 old_load += tickless_load;
5080 new_load = this_load;
5082 * Round up the averaging division if load is increasing. This
5083 * prevents us from getting stuck on 9 if the load is 10, for
5086 if (new_load > old_load)
5087 new_load += scale - 1;
5089 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
5092 sched_avg_update(this_rq);
5095 /* Used instead of source_load when we know the type == 0 */
5096 static unsigned long weighted_cpuload(const int cpu)
5098 return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
5101 #ifdef CONFIG_NO_HZ_COMMON
5103 * There is no sane way to deal with nohz on smp when using jiffies because the
5104 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
5105 * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
5107 * Therefore we need to avoid the delta approach from the regular tick when
5108 * possible since that would seriously skew the load calculation. This is why we
5109 * use cpu_load_update_periodic() for CPUs out of nohz. However we'll rely on
5110 * jiffies deltas for updates happening while in nohz mode (idle ticks, idle
5111 * loop exit, nohz_idle_balance, nohz full exit...)
5113 * This means we might still be one tick off for nohz periods.
5116 static void cpu_load_update_nohz(struct rq *this_rq,
5117 unsigned long curr_jiffies,
5120 unsigned long pending_updates;
5122 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
5123 if (pending_updates) {
5124 this_rq->last_load_update_tick = curr_jiffies;
5126 * In the regular NOHZ case, we were idle, this means load 0.
5127 * In the NOHZ_FULL case, we were non-idle, we should consider
5128 * its weighted load.
5130 cpu_load_update(this_rq, load, pending_updates);
5135 * Called from nohz_idle_balance() to update the load ratings before doing the
5138 static void cpu_load_update_idle(struct rq *this_rq)
5141 * bail if there's load or we're actually up-to-date.
5143 if (weighted_cpuload(cpu_of(this_rq)))
5146 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0);
5150 * Record CPU load on nohz entry so we know the tickless load to account
5151 * on nohz exit. cpu_load[0] happens then to be updated more frequently
5152 * than other cpu_load[idx] but it should be fine as cpu_load readers
5153 * shouldn't rely into synchronized cpu_load[*] updates.
5155 void cpu_load_update_nohz_start(void)
5157 struct rq *this_rq = this_rq();
5160 * This is all lockless but should be fine. If weighted_cpuload changes
5161 * concurrently we'll exit nohz. And cpu_load write can race with
5162 * cpu_load_update_idle() but both updater would be writing the same.
5164 this_rq->cpu_load[0] = weighted_cpuload(cpu_of(this_rq));
5168 * Account the tickless load in the end of a nohz frame.
5170 void cpu_load_update_nohz_stop(void)
5172 unsigned long curr_jiffies = READ_ONCE(jiffies);
5173 struct rq *this_rq = this_rq();
5177 if (curr_jiffies == this_rq->last_load_update_tick)
5180 load = weighted_cpuload(cpu_of(this_rq));
5181 rq_lock(this_rq, &rf);
5182 update_rq_clock(this_rq);
5183 cpu_load_update_nohz(this_rq, curr_jiffies, load);
5184 rq_unlock(this_rq, &rf);
5186 #else /* !CONFIG_NO_HZ_COMMON */
5187 static inline void cpu_load_update_nohz(struct rq *this_rq,
5188 unsigned long curr_jiffies,
5189 unsigned long load) { }
5190 #endif /* CONFIG_NO_HZ_COMMON */
5192 static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load)
5194 #ifdef CONFIG_NO_HZ_COMMON
5195 /* See the mess around cpu_load_update_nohz(). */
5196 this_rq->last_load_update_tick = READ_ONCE(jiffies);
5198 cpu_load_update(this_rq, load, 1);
5202 * Called from scheduler_tick()
5204 void cpu_load_update_active(struct rq *this_rq)
5206 unsigned long load = weighted_cpuload(cpu_of(this_rq));
5208 if (tick_nohz_tick_stopped())
5209 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load);
5211 cpu_load_update_periodic(this_rq, load);
5215 * Return a low guess at the load of a migration-source cpu weighted
5216 * according to the scheduling class and "nice" value.
5218 * We want to under-estimate the load of migration sources, to
5219 * balance conservatively.
5221 static unsigned long source_load(int cpu, int type)
5223 struct rq *rq = cpu_rq(cpu);
5224 unsigned long total = weighted_cpuload(cpu);
5226 if (type == 0 || !sched_feat(LB_BIAS))
5229 return min(rq->cpu_load[type-1], total);
5233 * Return a high guess at the load of a migration-target cpu weighted
5234 * according to the scheduling class and "nice" value.
5236 static unsigned long target_load(int cpu, int type)
5238 struct rq *rq = cpu_rq(cpu);
5239 unsigned long total = weighted_cpuload(cpu);
5241 if (type == 0 || !sched_feat(LB_BIAS))
5244 return max(rq->cpu_load[type-1], total);
5247 static unsigned long capacity_of(int cpu)
5249 return cpu_rq(cpu)->cpu_capacity;
5252 static unsigned long capacity_orig_of(int cpu)
5254 return cpu_rq(cpu)->cpu_capacity_orig;
5257 static unsigned long cpu_avg_load_per_task(int cpu)
5259 struct rq *rq = cpu_rq(cpu);
5260 unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
5261 unsigned long load_avg = weighted_cpuload(cpu);
5264 return load_avg / nr_running;
5269 #ifdef CONFIG_FAIR_GROUP_SCHED
5271 * effective_load() calculates the load change as seen from the root_task_group
5273 * Adding load to a group doesn't make a group heavier, but can cause movement
5274 * of group shares between cpus. Assuming the shares were perfectly aligned one
5275 * can calculate the shift in shares.
5277 * Calculate the effective load difference if @wl is added (subtracted) to @tg
5278 * on this @cpu and results in a total addition (subtraction) of @wg to the
5279 * total group weight.
5281 * Given a runqueue weight distribution (rw_i) we can compute a shares
5282 * distribution (s_i) using:
5284 * s_i = rw_i / \Sum rw_j (1)
5286 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
5287 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
5288 * shares distribution (s_i):
5290 * rw_i = { 2, 4, 1, 0 }
5291 * s_i = { 2/7, 4/7, 1/7, 0 }
5293 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
5294 * task used to run on and the CPU the waker is running on), we need to
5295 * compute the effect of waking a task on either CPU and, in case of a sync
5296 * wakeup, compute the effect of the current task going to sleep.
5298 * So for a change of @wl to the local @cpu with an overall group weight change
5299 * of @wl we can compute the new shares distribution (s'_i) using:
5301 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
5303 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
5304 * differences in waking a task to CPU 0. The additional task changes the
5305 * weight and shares distributions like:
5307 * rw'_i = { 3, 4, 1, 0 }
5308 * s'_i = { 3/8, 4/8, 1/8, 0 }
5310 * We can then compute the difference in effective weight by using:
5312 * dw_i = S * (s'_i - s_i) (3)
5314 * Where 'S' is the group weight as seen by its parent.
5316 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
5317 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
5318 * 4/7) times the weight of the group.
5320 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
5322 struct sched_entity *se = tg->se[cpu];
5324 if (!tg->parent) /* the trivial, non-cgroup case */
5327 for_each_sched_entity(se) {
5328 struct cfs_rq *cfs_rq = se->my_q;
5329 long W, w = cfs_rq_load_avg(cfs_rq);
5334 * W = @wg + \Sum rw_j
5336 W = wg + atomic_long_read(&tg->load_avg);
5338 /* Ensure \Sum rw_j >= rw_i */
5339 W -= cfs_rq->tg_load_avg_contrib;
5348 * wl = S * s'_i; see (2)
5351 wl = (w * (long)scale_load_down(tg->shares)) / W;
5353 wl = scale_load_down(tg->shares);
5356 * Per the above, wl is the new se->load.weight value; since
5357 * those are clipped to [MIN_SHARES, ...) do so now. See
5358 * calc_cfs_shares().
5360 if (wl < MIN_SHARES)
5364 * wl = dw_i = S * (s'_i - s_i); see (3)
5366 wl -= se->avg.load_avg;
5369 * Recursively apply this logic to all parent groups to compute
5370 * the final effective load change on the root group. Since
5371 * only the @tg group gets extra weight, all parent groups can
5372 * only redistribute existing shares. @wl is the shift in shares
5373 * resulting from this level per the above.
5382 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
5389 static void record_wakee(struct task_struct *p)
5392 * Only decay a single time; tasks that have less then 1 wakeup per
5393 * jiffy will not have built up many flips.
5395 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
5396 current->wakee_flips >>= 1;
5397 current->wakee_flip_decay_ts = jiffies;
5400 if (current->last_wakee != p) {
5401 current->last_wakee = p;
5402 current->wakee_flips++;
5407 * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
5409 * A waker of many should wake a different task than the one last awakened
5410 * at a frequency roughly N times higher than one of its wakees.
5412 * In order to determine whether we should let the load spread vs consolidating
5413 * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
5414 * partner, and a factor of lls_size higher frequency in the other.
5416 * With both conditions met, we can be relatively sure that the relationship is
5417 * non-monogamous, with partner count exceeding socket size.
5419 * Waker/wakee being client/server, worker/dispatcher, interrupt source or
5420 * whatever is irrelevant, spread criteria is apparent partner count exceeds
5423 static int wake_wide(struct task_struct *p)
5425 unsigned int master = current->wakee_flips;
5426 unsigned int slave = p->wakee_flips;
5427 int factor = this_cpu_read(sd_llc_size);
5430 swap(master, slave);
5431 if (slave < factor || master < slave * factor)
5436 static int wake_affine(struct sched_domain *sd, struct task_struct *p,
5437 int prev_cpu, int sync)
5439 s64 this_load, load;
5440 s64 this_eff_load, prev_eff_load;
5442 struct task_group *tg;
5443 unsigned long weight;
5447 this_cpu = smp_processor_id();
5448 load = source_load(prev_cpu, idx);
5449 this_load = target_load(this_cpu, idx);
5452 * If sync wakeup then subtract the (maximum possible)
5453 * effect of the currently running task from the load
5454 * of the current CPU:
5457 tg = task_group(current);
5458 weight = current->se.avg.load_avg;
5460 this_load += effective_load(tg, this_cpu, -weight, -weight);
5461 load += effective_load(tg, prev_cpu, 0, -weight);
5465 weight = p->se.avg.load_avg;
5468 * In low-load situations, where prev_cpu is idle and this_cpu is idle
5469 * due to the sync cause above having dropped this_load to 0, we'll
5470 * always have an imbalance, but there's really nothing you can do
5471 * about that, so that's good too.
5473 * Otherwise check if either cpus are near enough in load to allow this
5474 * task to be woken on this_cpu.
5476 this_eff_load = 100;
5477 this_eff_load *= capacity_of(prev_cpu);
5479 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
5480 prev_eff_load *= capacity_of(this_cpu);
5482 if (this_load > 0) {
5483 this_eff_load *= this_load +
5484 effective_load(tg, this_cpu, weight, weight);
5486 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
5489 balanced = this_eff_load <= prev_eff_load;
5491 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
5496 schedstat_inc(sd->ttwu_move_affine);
5497 schedstat_inc(p->se.statistics.nr_wakeups_affine);
5502 static inline int task_util(struct task_struct *p);
5503 static int cpu_util_wake(int cpu, struct task_struct *p);
5505 static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
5507 return capacity_orig_of(cpu) - cpu_util_wake(cpu, p);
5511 * find_idlest_group finds and returns the least busy CPU group within the
5514 static struct sched_group *
5515 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5516 int this_cpu, int sd_flag)
5518 struct sched_group *idlest = NULL, *group = sd->groups;
5519 struct sched_group *most_spare_sg = NULL;
5520 unsigned long min_runnable_load = ULONG_MAX, this_runnable_load = 0;
5521 unsigned long min_avg_load = ULONG_MAX, this_avg_load = 0;
5522 unsigned long most_spare = 0, this_spare = 0;
5523 int load_idx = sd->forkexec_idx;
5524 int imbalance_scale = 100 + (sd->imbalance_pct-100)/2;
5525 unsigned long imbalance = scale_load_down(NICE_0_LOAD) *
5526 (sd->imbalance_pct-100) / 100;
5528 if (sd_flag & SD_BALANCE_WAKE)
5529 load_idx = sd->wake_idx;
5532 unsigned long load, avg_load, runnable_load;
5533 unsigned long spare_cap, max_spare_cap;
5537 /* Skip over this group if it has no CPUs allowed */
5538 if (!cpumask_intersects(sched_group_cpus(group),
5542 local_group = cpumask_test_cpu(this_cpu,
5543 sched_group_cpus(group));
5546 * Tally up the load of all CPUs in the group and find
5547 * the group containing the CPU with most spare capacity.
5553 for_each_cpu(i, sched_group_cpus(group)) {
5554 /* Bias balancing toward cpus of our domain */
5556 load = source_load(i, load_idx);
5558 load = target_load(i, load_idx);
5560 runnable_load += load;
5562 avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
5564 spare_cap = capacity_spare_wake(i, p);
5566 if (spare_cap > max_spare_cap)
5567 max_spare_cap = spare_cap;
5570 /* Adjust by relative CPU capacity of the group */
5571 avg_load = (avg_load * SCHED_CAPACITY_SCALE) /
5572 group->sgc->capacity;
5573 runnable_load = (runnable_load * SCHED_CAPACITY_SCALE) /
5574 group->sgc->capacity;
5577 this_runnable_load = runnable_load;
5578 this_avg_load = avg_load;
5579 this_spare = max_spare_cap;
5581 if (min_runnable_load > (runnable_load + imbalance)) {
5583 * The runnable load is significantly smaller
5584 * so we can pick this new cpu
5586 min_runnable_load = runnable_load;
5587 min_avg_load = avg_load;
5589 } else if ((runnable_load < (min_runnable_load + imbalance)) &&
5590 (100*min_avg_load > imbalance_scale*avg_load)) {
5592 * The runnable loads are close so take the
5593 * blocked load into account through avg_load.
5595 min_avg_load = avg_load;
5599 if (most_spare < max_spare_cap) {
5600 most_spare = max_spare_cap;
5601 most_spare_sg = group;
5604 } while (group = group->next, group != sd->groups);
5607 * The cross-over point between using spare capacity or least load
5608 * is too conservative for high utilization tasks on partially
5609 * utilized systems if we require spare_capacity > task_util(p),
5610 * so we allow for some task stuffing by using
5611 * spare_capacity > task_util(p)/2.
5613 * Spare capacity can't be used for fork because the utilization has
5614 * not been set yet, we must first select a rq to compute the initial
5617 if (sd_flag & SD_BALANCE_FORK)
5620 if (this_spare > task_util(p) / 2 &&
5621 imbalance_scale*this_spare > 100*most_spare)
5624 if (most_spare > task_util(p) / 2)
5625 return most_spare_sg;
5631 if (min_runnable_load > (this_runnable_load + imbalance))
5634 if ((this_runnable_load < (min_runnable_load + imbalance)) &&
5635 (100*this_avg_load < imbalance_scale*min_avg_load))
5642 * find_idlest_cpu - find the idlest cpu among the cpus in group.
5645 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
5647 unsigned long load, min_load = ULONG_MAX;
5648 unsigned int min_exit_latency = UINT_MAX;
5649 u64 latest_idle_timestamp = 0;
5650 int least_loaded_cpu = this_cpu;
5651 int shallowest_idle_cpu = -1;
5654 /* Check if we have any choice: */
5655 if (group->group_weight == 1)
5656 return cpumask_first(sched_group_cpus(group));
5658 /* Traverse only the allowed CPUs */
5659 for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
5661 struct rq *rq = cpu_rq(i);
5662 struct cpuidle_state *idle = idle_get_state(rq);
5663 if (idle && idle->exit_latency < min_exit_latency) {
5665 * We give priority to a CPU whose idle state
5666 * has the smallest exit latency irrespective
5667 * of any idle timestamp.
5669 min_exit_latency = idle->exit_latency;
5670 latest_idle_timestamp = rq->idle_stamp;
5671 shallowest_idle_cpu = i;
5672 } else if ((!idle || idle->exit_latency == min_exit_latency) &&
5673 rq->idle_stamp > latest_idle_timestamp) {
5675 * If equal or no active idle state, then
5676 * the most recently idled CPU might have
5679 latest_idle_timestamp = rq->idle_stamp;
5680 shallowest_idle_cpu = i;
5682 } else if (shallowest_idle_cpu == -1) {
5683 load = weighted_cpuload(i);
5684 if (load < min_load || (load == min_load && i == this_cpu)) {
5686 least_loaded_cpu = i;
5691 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
5695 * Implement a for_each_cpu() variant that starts the scan at a given cpu
5696 * (@start), and wraps around.
5698 * This is used to scan for idle CPUs; such that not all CPUs looking for an
5699 * idle CPU find the same CPU. The down-side is that tasks tend to cycle
5700 * through the LLC domain.
5702 * Especially tbench is found sensitive to this.
5705 static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int *wrapped)
5710 next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1);
5714 return nr_cpumask_bits;
5716 if (next >= nr_cpumask_bits) {
5726 #define for_each_cpu_wrap(cpu, mask, start, wrap) \
5727 for ((wrap) = 0, (cpu) = (start)-1; \
5728 (cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)), \
5729 (cpu) < nr_cpumask_bits; )
5731 #ifdef CONFIG_SCHED_SMT
5733 static inline void set_idle_cores(int cpu, int val)
5735 struct sched_domain_shared *sds;
5737 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
5739 WRITE_ONCE(sds->has_idle_cores, val);
5742 static inline bool test_idle_cores(int cpu, bool def)
5744 struct sched_domain_shared *sds;
5746 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
5748 return READ_ONCE(sds->has_idle_cores);
5754 * Scans the local SMT mask to see if the entire core is idle, and records this
5755 * information in sd_llc_shared->has_idle_cores.
5757 * Since SMT siblings share all cache levels, inspecting this limited remote
5758 * state should be fairly cheap.
5760 void __update_idle_core(struct rq *rq)
5762 int core = cpu_of(rq);
5766 if (test_idle_cores(core, true))
5769 for_each_cpu(cpu, cpu_smt_mask(core)) {
5777 set_idle_cores(core, 1);
5783 * Scan the entire LLC domain for idle cores; this dynamically switches off if
5784 * there are no idle cores left in the system; tracked through
5785 * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
5787 static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
5789 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
5790 int core, cpu, wrap;
5792 if (!static_branch_likely(&sched_smt_present))
5795 if (!test_idle_cores(target, false))
5798 cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
5800 for_each_cpu_wrap(core, cpus, target, wrap) {
5803 for_each_cpu(cpu, cpu_smt_mask(core)) {
5804 cpumask_clear_cpu(cpu, cpus);
5814 * Failed to find an idle core; stop looking for one.
5816 set_idle_cores(target, 0);
5822 * Scan the local SMT mask for idle CPUs.
5824 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
5828 if (!static_branch_likely(&sched_smt_present))
5831 for_each_cpu(cpu, cpu_smt_mask(target)) {
5832 if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
5841 #else /* CONFIG_SCHED_SMT */
5843 static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
5848 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
5853 #endif /* CONFIG_SCHED_SMT */
5856 * Scan the LLC domain for idle CPUs; this is dynamically regulated by
5857 * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
5858 * average idle time for this rq (as found in rq->avg_idle).
5860 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
5862 struct sched_domain *this_sd;
5863 u64 avg_cost, avg_idle = this_rq()->avg_idle;
5868 this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
5872 avg_cost = this_sd->avg_scan_cost;
5875 * Due to large variance we need a large fuzz factor; hackbench in
5876 * particularly is sensitive here.
5878 if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost)
5881 time = local_clock();
5883 for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
5884 if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
5890 time = local_clock() - time;
5891 cost = this_sd->avg_scan_cost;
5892 delta = (s64)(time - cost) / 8;
5893 this_sd->avg_scan_cost += delta;
5899 * Try and locate an idle core/thread in the LLC cache domain.
5901 static int select_idle_sibling(struct task_struct *p, int prev, int target)
5903 struct sched_domain *sd;
5906 if (idle_cpu(target))
5910 * If the previous cpu is cache affine and idle, don't be stupid.
5912 if (prev != target && cpus_share_cache(prev, target) && idle_cpu(prev))
5915 sd = rcu_dereference(per_cpu(sd_llc, target));
5919 i = select_idle_core(p, sd, target);
5920 if ((unsigned)i < nr_cpumask_bits)
5923 i = select_idle_cpu(p, sd, target);
5924 if ((unsigned)i < nr_cpumask_bits)
5927 i = select_idle_smt(p, sd, target);
5928 if ((unsigned)i < nr_cpumask_bits)
5935 * cpu_util returns the amount of capacity of a CPU that is used by CFS
5936 * tasks. The unit of the return value must be the one of capacity so we can
5937 * compare the utilization with the capacity of the CPU that is available for
5938 * CFS task (ie cpu_capacity).
5940 * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
5941 * recent utilization of currently non-runnable tasks on a CPU. It represents
5942 * the amount of utilization of a CPU in the range [0..capacity_orig] where
5943 * capacity_orig is the cpu_capacity available at the highest frequency
5944 * (arch_scale_freq_capacity()).
5945 * The utilization of a CPU converges towards a sum equal to or less than the
5946 * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
5947 * the running time on this CPU scaled by capacity_curr.
5949 * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
5950 * higher than capacity_orig because of unfortunate rounding in
5951 * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
5952 * the average stabilizes with the new running time. We need to check that the
5953 * utilization stays within the range of [0..capacity_orig] and cap it if
5954 * necessary. Without utilization capping, a group could be seen as overloaded
5955 * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
5956 * available capacity. We allow utilization to overshoot capacity_curr (but not
5957 * capacity_orig) as it useful for predicting the capacity required after task
5958 * migrations (scheduler-driven DVFS).
5960 static int cpu_util(int cpu)
5962 unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
5963 unsigned long capacity = capacity_orig_of(cpu);
5965 return (util >= capacity) ? capacity : util;
5968 static inline int task_util(struct task_struct *p)
5970 return p->se.avg.util_avg;
5974 * cpu_util_wake: Compute cpu utilization with any contributions from
5975 * the waking task p removed.
5977 static int cpu_util_wake(int cpu, struct task_struct *p)
5979 unsigned long util, capacity;
5981 /* Task has no contribution or is new */
5982 if (cpu != task_cpu(p) || !p->se.avg.last_update_time)
5983 return cpu_util(cpu);
5985 capacity = capacity_orig_of(cpu);
5986 util = max_t(long, cpu_rq(cpu)->cfs.avg.util_avg - task_util(p), 0);
5988 return (util >= capacity) ? capacity : util;
5992 * Disable WAKE_AFFINE in the case where task @p doesn't fit in the
5993 * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu.
5995 * In that case WAKE_AFFINE doesn't make sense and we'll let
5996 * BALANCE_WAKE sort things out.
5998 static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
6000 long min_cap, max_cap;
6002 min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
6003 max_cap = cpu_rq(cpu)->rd->max_cpu_capacity;
6005 /* Minimum capacity is close to max, no need to abort wake_affine */
6006 if (max_cap - min_cap < max_cap >> 3)
6009 /* Bring task utilization in sync with prev_cpu */
6010 sync_entity_load_avg(&p->se);
6012 return min_cap * 1024 < task_util(p) * capacity_margin;
6016 * select_task_rq_fair: Select target runqueue for the waking task in domains
6017 * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
6018 * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
6020 * Balances load by selecting the idlest cpu in the idlest group, or under
6021 * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
6023 * Returns the target cpu number.
6025 * preempt must be disabled.
6028 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
6030 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
6031 int cpu = smp_processor_id();
6032 int new_cpu = prev_cpu;
6033 int want_affine = 0;
6034 int sync = wake_flags & WF_SYNC;
6036 if (sd_flag & SD_BALANCE_WAKE) {
6038 want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
6039 && cpumask_test_cpu(cpu, &p->cpus_allowed);
6043 for_each_domain(cpu, tmp) {
6044 if (!(tmp->flags & SD_LOAD_BALANCE))
6048 * If both cpu and prev_cpu are part of this domain,
6049 * cpu is a valid SD_WAKE_AFFINE target.
6051 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
6052 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
6057 if (tmp->flags & sd_flag)
6059 else if (!want_affine)
6064 sd = NULL; /* Prefer wake_affine over balance flags */
6065 if (cpu != prev_cpu && wake_affine(affine_sd, p, prev_cpu, sync))
6070 if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
6071 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
6074 struct sched_group *group;
6077 if (!(sd->flags & sd_flag)) {
6082 group = find_idlest_group(sd, p, cpu, sd_flag);
6088 new_cpu = find_idlest_cpu(group, p, cpu);
6089 if (new_cpu == -1 || new_cpu == cpu) {
6090 /* Now try balancing at a lower domain level of cpu */
6095 /* Now try balancing at a lower domain level of new_cpu */
6097 weight = sd->span_weight;
6099 for_each_domain(cpu, tmp) {
6100 if (weight <= tmp->span_weight)
6102 if (tmp->flags & sd_flag)
6105 /* while loop will break here if sd == NULL */
6113 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
6114 * cfs_rq_of(p) references at time of call are still valid and identify the
6115 * previous cpu. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
6117 static void migrate_task_rq_fair(struct task_struct *p)
6120 * As blocked tasks retain absolute vruntime the migration needs to
6121 * deal with this by subtracting the old and adding the new
6122 * min_vruntime -- the latter is done by enqueue_entity() when placing
6123 * the task on the new runqueue.
6125 if (p->state == TASK_WAKING) {
6126 struct sched_entity *se = &p->se;
6127 struct cfs_rq *cfs_rq = cfs_rq_of(se);
6130 #ifndef CONFIG_64BIT
6131 u64 min_vruntime_copy;
6134 min_vruntime_copy = cfs_rq->min_vruntime_copy;
6136 min_vruntime = cfs_rq->min_vruntime;
6137 } while (min_vruntime != min_vruntime_copy);
6139 min_vruntime = cfs_rq->min_vruntime;
6142 se->vruntime -= min_vruntime;
6146 * We are supposed to update the task to "current" time, then its up to date
6147 * and ready to go to new CPU/cfs_rq. But we have difficulty in getting
6148 * what current time is, so simply throw away the out-of-date time. This
6149 * will result in the wakee task is less decayed, but giving the wakee more
6150 * load sounds not bad.
6152 remove_entity_load_avg(&p->se);
6154 /* Tell new CPU we are migrated */
6155 p->se.avg.last_update_time = 0;
6157 /* We have migrated, no longer consider this task hot */
6158 p->se.exec_start = 0;
6161 static void task_dead_fair(struct task_struct *p)
6163 remove_entity_load_avg(&p->se);
6165 #endif /* CONFIG_SMP */
6167 static unsigned long
6168 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
6170 unsigned long gran = sysctl_sched_wakeup_granularity;
6173 * Since its curr running now, convert the gran from real-time
6174 * to virtual-time in his units.
6176 * By using 'se' instead of 'curr' we penalize light tasks, so
6177 * they get preempted easier. That is, if 'se' < 'curr' then
6178 * the resulting gran will be larger, therefore penalizing the
6179 * lighter, if otoh 'se' > 'curr' then the resulting gran will
6180 * be smaller, again penalizing the lighter task.
6182 * This is especially important for buddies when the leftmost
6183 * task is higher priority than the buddy.
6185 return calc_delta_fair(gran, se);
6189 * Should 'se' preempt 'curr'.
6203 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
6205 s64 gran, vdiff = curr->vruntime - se->vruntime;
6210 gran = wakeup_gran(curr, se);
6217 static void set_last_buddy(struct sched_entity *se)
6219 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
6222 for_each_sched_entity(se)
6223 cfs_rq_of(se)->last = se;
6226 static void set_next_buddy(struct sched_entity *se)
6228 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
6231 for_each_sched_entity(se)
6232 cfs_rq_of(se)->next = se;
6235 static void set_skip_buddy(struct sched_entity *se)
6237 for_each_sched_entity(se)
6238 cfs_rq_of(se)->skip = se;
6242 * Preempt the current task with a newly woken task if needed:
6244 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
6246 struct task_struct *curr = rq->curr;
6247 struct sched_entity *se = &curr->se, *pse = &p->se;
6248 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
6249 int scale = cfs_rq->nr_running >= sched_nr_latency;
6250 int next_buddy_marked = 0;
6252 if (unlikely(se == pse))
6256 * This is possible from callers such as attach_tasks(), in which we
6257 * unconditionally check_prempt_curr() after an enqueue (which may have
6258 * lead to a throttle). This both saves work and prevents false
6259 * next-buddy nomination below.
6261 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
6264 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
6265 set_next_buddy(pse);
6266 next_buddy_marked = 1;
6270 * We can come here with TIF_NEED_RESCHED already set from new task
6273 * Note: this also catches the edge-case of curr being in a throttled
6274 * group (e.g. via set_curr_task), since update_curr() (in the
6275 * enqueue of curr) will have resulted in resched being set. This
6276 * prevents us from potentially nominating it as a false LAST_BUDDY
6279 if (test_tsk_need_resched(curr))
6282 /* Idle tasks are by definition preempted by non-idle tasks. */
6283 if (unlikely(curr->policy == SCHED_IDLE) &&
6284 likely(p->policy != SCHED_IDLE))
6288 * Batch and idle tasks do not preempt non-idle tasks (their preemption
6289 * is driven by the tick):
6291 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
6294 find_matching_se(&se, &pse);
6295 update_curr(cfs_rq_of(se));
6297 if (wakeup_preempt_entity(se, pse) == 1) {
6299 * Bias pick_next to pick the sched entity that is
6300 * triggering this preemption.
6302 if (!next_buddy_marked)
6303 set_next_buddy(pse);
6312 * Only set the backward buddy when the current task is still
6313 * on the rq. This can happen when a wakeup gets interleaved
6314 * with schedule on the ->pre_schedule() or idle_balance()
6315 * point, either of which can * drop the rq lock.
6317 * Also, during early boot the idle thread is in the fair class,
6318 * for obvious reasons its a bad idea to schedule back to it.
6320 if (unlikely(!se->on_rq || curr == rq->idle))
6323 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
6327 static struct task_struct *
6328 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6330 struct cfs_rq *cfs_rq = &rq->cfs;
6331 struct sched_entity *se;
6332 struct task_struct *p;
6336 #ifdef CONFIG_FAIR_GROUP_SCHED
6337 if (!cfs_rq->nr_running)
6340 if (prev->sched_class != &fair_sched_class)
6344 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
6345 * likely that a next task is from the same cgroup as the current.
6347 * Therefore attempt to avoid putting and setting the entire cgroup
6348 * hierarchy, only change the part that actually changes.
6352 struct sched_entity *curr = cfs_rq->curr;
6355 * Since we got here without doing put_prev_entity() we also
6356 * have to consider cfs_rq->curr. If it is still a runnable
6357 * entity, update_curr() will update its vruntime, otherwise
6358 * forget we've ever seen it.
6362 update_curr(cfs_rq);
6367 * This call to check_cfs_rq_runtime() will do the
6368 * throttle and dequeue its entity in the parent(s).
6369 * Therefore the 'simple' nr_running test will indeed
6372 if (unlikely(check_cfs_rq_runtime(cfs_rq)))
6376 se = pick_next_entity(cfs_rq, curr);
6377 cfs_rq = group_cfs_rq(se);
6383 * Since we haven't yet done put_prev_entity and if the selected task
6384 * is a different task than we started out with, try and touch the
6385 * least amount of cfs_rqs.
6388 struct sched_entity *pse = &prev->se;
6390 while (!(cfs_rq = is_same_group(se, pse))) {
6391 int se_depth = se->depth;
6392 int pse_depth = pse->depth;
6394 if (se_depth <= pse_depth) {
6395 put_prev_entity(cfs_rq_of(pse), pse);
6396 pse = parent_entity(pse);
6398 if (se_depth >= pse_depth) {
6399 set_next_entity(cfs_rq_of(se), se);
6400 se = parent_entity(se);
6404 put_prev_entity(cfs_rq, pse);
6405 set_next_entity(cfs_rq, se);
6408 if (hrtick_enabled(rq))
6409 hrtick_start_fair(rq, p);
6416 if (!cfs_rq->nr_running)
6419 put_prev_task(rq, prev);
6422 se = pick_next_entity(cfs_rq, NULL);
6423 set_next_entity(cfs_rq, se);
6424 cfs_rq = group_cfs_rq(se);
6429 if (hrtick_enabled(rq))
6430 hrtick_start_fair(rq, p);
6435 new_tasks = idle_balance(rq, rf);
6438 * Because idle_balance() releases (and re-acquires) rq->lock, it is
6439 * possible for any higher priority task to appear. In that case we
6440 * must re-start the pick_next_entity() loop.
6452 * Account for a descheduled task:
6454 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
6456 struct sched_entity *se = &prev->se;
6457 struct cfs_rq *cfs_rq;
6459 for_each_sched_entity(se) {
6460 cfs_rq = cfs_rq_of(se);
6461 put_prev_entity(cfs_rq, se);
6466 * sched_yield() is very simple
6468 * The magic of dealing with the ->skip buddy is in pick_next_entity.
6470 static void yield_task_fair(struct rq *rq)
6472 struct task_struct *curr = rq->curr;
6473 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
6474 struct sched_entity *se = &curr->se;
6477 * Are we the only task in the tree?
6479 if (unlikely(rq->nr_running == 1))
6482 clear_buddies(cfs_rq, se);
6484 if (curr->policy != SCHED_BATCH) {
6485 update_rq_clock(rq);
6487 * Update run-time statistics of the 'current'.
6489 update_curr(cfs_rq);
6491 * Tell update_rq_clock() that we've just updated,
6492 * so we don't do microscopic update in schedule()
6493 * and double the fastpath cost.
6495 rq_clock_skip_update(rq, true);
6501 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
6503 struct sched_entity *se = &p->se;
6505 /* throttled hierarchies are not runnable */
6506 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
6509 /* Tell the scheduler that we'd really like pse to run next. */
6512 yield_task_fair(rq);
6518 /**************************************************
6519 * Fair scheduling class load-balancing methods.
6523 * The purpose of load-balancing is to achieve the same basic fairness the
6524 * per-cpu scheduler provides, namely provide a proportional amount of compute
6525 * time to each task. This is expressed in the following equation:
6527 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
6529 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
6530 * W_i,0 is defined as:
6532 * W_i,0 = \Sum_j w_i,j (2)
6534 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
6535 * is derived from the nice value as per sched_prio_to_weight[].
6537 * The weight average is an exponential decay average of the instantaneous
6540 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
6542 * C_i is the compute capacity of cpu i, typically it is the
6543 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
6544 * can also include other factors [XXX].
6546 * To achieve this balance we define a measure of imbalance which follows
6547 * directly from (1):
6549 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
6551 * We them move tasks around to minimize the imbalance. In the continuous
6552 * function space it is obvious this converges, in the discrete case we get
6553 * a few fun cases generally called infeasible weight scenarios.
6556 * - infeasible weights;
6557 * - local vs global optima in the discrete case. ]
6562 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
6563 * for all i,j solution, we create a tree of cpus that follows the hardware
6564 * topology where each level pairs two lower groups (or better). This results
6565 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
6566 * tree to only the first of the previous level and we decrease the frequency
6567 * of load-balance at each level inv. proportional to the number of cpus in
6573 * \Sum { --- * --- * 2^i } = O(n) (5)
6575 * `- size of each group
6576 * | | `- number of cpus doing load-balance
6578 * `- sum over all levels
6580 * Coupled with a limit on how many tasks we can migrate every balance pass,
6581 * this makes (5) the runtime complexity of the balancer.
6583 * An important property here is that each CPU is still (indirectly) connected
6584 * to every other cpu in at most O(log n) steps:
6586 * The adjacency matrix of the resulting graph is given by:
6589 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
6592 * And you'll find that:
6594 * A^(log_2 n)_i,j != 0 for all i,j (7)
6596 * Showing there's indeed a path between every cpu in at most O(log n) steps.
6597 * The task movement gives a factor of O(m), giving a convergence complexity
6600 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
6605 * In order to avoid CPUs going idle while there's still work to do, new idle
6606 * balancing is more aggressive and has the newly idle cpu iterate up the domain
6607 * tree itself instead of relying on other CPUs to bring it work.
6609 * This adds some complexity to both (5) and (8) but it reduces the total idle
6617 * Cgroups make a horror show out of (2), instead of a simple sum we get:
6620 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
6625 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
6627 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
6629 * The big problem is S_k, its a global sum needed to compute a local (W_i)
6632 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
6633 * rewrite all of this once again.]
6636 static unsigned long __read_mostly max_load_balance_interval = HZ/10;
6638 enum fbq_type { regular, remote, all };
6640 #define LBF_ALL_PINNED 0x01
6641 #define LBF_NEED_BREAK 0x02
6642 #define LBF_DST_PINNED 0x04
6643 #define LBF_SOME_PINNED 0x08
6646 struct sched_domain *sd;
6654 struct cpumask *dst_grpmask;
6656 enum cpu_idle_type idle;
6658 /* The set of CPUs under consideration for load-balancing */
6659 struct cpumask *cpus;
6664 unsigned int loop_break;
6665 unsigned int loop_max;
6667 enum fbq_type fbq_type;
6668 struct list_head tasks;
6672 * Is this task likely cache-hot:
6674 static int task_hot(struct task_struct *p, struct lb_env *env)
6678 lockdep_assert_held(&env->src_rq->lock);
6680 if (p->sched_class != &fair_sched_class)
6683 if (unlikely(p->policy == SCHED_IDLE))
6687 * Buddy candidates are cache hot:
6689 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
6690 (&p->se == cfs_rq_of(&p->se)->next ||
6691 &p->se == cfs_rq_of(&p->se)->last))
6694 if (sysctl_sched_migration_cost == -1)
6696 if (sysctl_sched_migration_cost == 0)
6699 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
6701 return delta < (s64)sysctl_sched_migration_cost;
6704 #ifdef CONFIG_NUMA_BALANCING
6706 * Returns 1, if task migration degrades locality
6707 * Returns 0, if task migration improves locality i.e migration preferred.
6708 * Returns -1, if task migration is not affected by locality.
6710 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
6712 struct numa_group *numa_group = rcu_dereference(p->numa_group);
6713 unsigned long src_faults, dst_faults;
6714 int src_nid, dst_nid;
6716 if (!static_branch_likely(&sched_numa_balancing))
6719 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
6722 src_nid = cpu_to_node(env->src_cpu);
6723 dst_nid = cpu_to_node(env->dst_cpu);
6725 if (src_nid == dst_nid)
6728 /* Migrating away from the preferred node is always bad. */
6729 if (src_nid == p->numa_preferred_nid) {
6730 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
6736 /* Encourage migration to the preferred node. */
6737 if (dst_nid == p->numa_preferred_nid)
6741 src_faults = group_faults(p, src_nid);
6742 dst_faults = group_faults(p, dst_nid);
6744 src_faults = task_faults(p, src_nid);
6745 dst_faults = task_faults(p, dst_nid);
6748 return dst_faults < src_faults;
6752 static inline int migrate_degrades_locality(struct task_struct *p,
6760 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
6763 int can_migrate_task(struct task_struct *p, struct lb_env *env)
6767 lockdep_assert_held(&env->src_rq->lock);
6770 * We do not migrate tasks that are:
6771 * 1) throttled_lb_pair, or
6772 * 2) cannot be migrated to this CPU due to cpus_allowed, or
6773 * 3) running (obviously), or
6774 * 4) are cache-hot on their current CPU.
6776 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
6779 if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) {
6782 schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
6784 env->flags |= LBF_SOME_PINNED;
6787 * Remember if this task can be migrated to any other cpu in
6788 * our sched_group. We may want to revisit it if we couldn't
6789 * meet load balance goals by pulling other tasks on src_cpu.
6791 * Also avoid computing new_dst_cpu if we have already computed
6792 * one in current iteration.
6794 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
6797 /* Prevent to re-select dst_cpu via env's cpus */
6798 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
6799 if (cpumask_test_cpu(cpu, &p->cpus_allowed)) {
6800 env->flags |= LBF_DST_PINNED;
6801 env->new_dst_cpu = cpu;
6809 /* Record that we found atleast one task that could run on dst_cpu */
6810 env->flags &= ~LBF_ALL_PINNED;
6812 if (task_running(env->src_rq, p)) {
6813 schedstat_inc(p->se.statistics.nr_failed_migrations_running);
6818 * Aggressive migration if:
6819 * 1) destination numa is preferred
6820 * 2) task is cache cold, or
6821 * 3) too many balance attempts have failed.
6823 tsk_cache_hot = migrate_degrades_locality(p, env);
6824 if (tsk_cache_hot == -1)
6825 tsk_cache_hot = task_hot(p, env);
6827 if (tsk_cache_hot <= 0 ||
6828 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
6829 if (tsk_cache_hot == 1) {
6830 schedstat_inc(env->sd->lb_hot_gained[env->idle]);
6831 schedstat_inc(p->se.statistics.nr_forced_migrations);
6836 schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
6841 * detach_task() -- detach the task for the migration specified in env
6843 static void detach_task(struct task_struct *p, struct lb_env *env)
6845 lockdep_assert_held(&env->src_rq->lock);
6847 p->on_rq = TASK_ON_RQ_MIGRATING;
6848 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
6849 set_task_cpu(p, env->dst_cpu);
6853 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
6854 * part of active balancing operations within "domain".
6856 * Returns a task if successful and NULL otherwise.
6858 static struct task_struct *detach_one_task(struct lb_env *env)
6860 struct task_struct *p, *n;
6862 lockdep_assert_held(&env->src_rq->lock);
6864 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
6865 if (!can_migrate_task(p, env))
6868 detach_task(p, env);
6871 * Right now, this is only the second place where
6872 * lb_gained[env->idle] is updated (other is detach_tasks)
6873 * so we can safely collect stats here rather than
6874 * inside detach_tasks().
6876 schedstat_inc(env->sd->lb_gained[env->idle]);
6882 static const unsigned int sched_nr_migrate_break = 32;
6885 * detach_tasks() -- tries to detach up to imbalance weighted load from
6886 * busiest_rq, as part of a balancing operation within domain "sd".
6888 * Returns number of detached tasks if successful and 0 otherwise.
6890 static int detach_tasks(struct lb_env *env)
6892 struct list_head *tasks = &env->src_rq->cfs_tasks;
6893 struct task_struct *p;
6897 lockdep_assert_held(&env->src_rq->lock);
6899 if (env->imbalance <= 0)
6902 while (!list_empty(tasks)) {
6904 * We don't want to steal all, otherwise we may be treated likewise,
6905 * which could at worst lead to a livelock crash.
6907 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
6910 p = list_first_entry(tasks, struct task_struct, se.group_node);
6913 /* We've more or less seen every task there is, call it quits */
6914 if (env->loop > env->loop_max)
6917 /* take a breather every nr_migrate tasks */
6918 if (env->loop > env->loop_break) {
6919 env->loop_break += sched_nr_migrate_break;
6920 env->flags |= LBF_NEED_BREAK;
6924 if (!can_migrate_task(p, env))
6927 load = task_h_load(p);
6929 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
6932 if ((load / 2) > env->imbalance)
6935 detach_task(p, env);
6936 list_add(&p->se.group_node, &env->tasks);
6939 env->imbalance -= load;
6941 #ifdef CONFIG_PREEMPT
6943 * NEWIDLE balancing is a source of latency, so preemptible
6944 * kernels will stop after the first task is detached to minimize
6945 * the critical section.
6947 if (env->idle == CPU_NEWLY_IDLE)
6952 * We only want to steal up to the prescribed amount of
6955 if (env->imbalance <= 0)
6960 list_move_tail(&p->se.group_node, tasks);
6964 * Right now, this is one of only two places we collect this stat
6965 * so we can safely collect detach_one_task() stats here rather
6966 * than inside detach_one_task().
6968 schedstat_add(env->sd->lb_gained[env->idle], detached);
6974 * attach_task() -- attach the task detached by detach_task() to its new rq.
6976 static void attach_task(struct rq *rq, struct task_struct *p)
6978 lockdep_assert_held(&rq->lock);
6980 BUG_ON(task_rq(p) != rq);
6981 activate_task(rq, p, ENQUEUE_NOCLOCK);
6982 p->on_rq = TASK_ON_RQ_QUEUED;
6983 check_preempt_curr(rq, p, 0);
6987 * attach_one_task() -- attaches the task returned from detach_one_task() to
6990 static void attach_one_task(struct rq *rq, struct task_struct *p)
6995 update_rq_clock(rq);
7001 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
7004 static void attach_tasks(struct lb_env *env)
7006 struct list_head *tasks = &env->tasks;
7007 struct task_struct *p;
7010 rq_lock(env->dst_rq, &rf);
7011 update_rq_clock(env->dst_rq);
7013 while (!list_empty(tasks)) {
7014 p = list_first_entry(tasks, struct task_struct, se.group_node);
7015 list_del_init(&p->se.group_node);
7017 attach_task(env->dst_rq, p);
7020 rq_unlock(env->dst_rq, &rf);
7023 #ifdef CONFIG_FAIR_GROUP_SCHED
7024 static void update_blocked_averages(int cpu)
7026 struct rq *rq = cpu_rq(cpu);
7027 struct cfs_rq *cfs_rq;
7030 rq_lock_irqsave(rq, &rf);
7031 update_rq_clock(rq);
7034 * Iterates the task_group tree in a bottom up fashion, see
7035 * list_add_leaf_cfs_rq() for details.
7037 for_each_leaf_cfs_rq(rq, cfs_rq) {
7038 struct sched_entity *se;
7040 /* throttled entities do not contribute to load */
7041 if (throttled_hierarchy(cfs_rq))
7044 if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
7045 update_tg_load_avg(cfs_rq, 0);
7047 /* Propagate pending load changes to the parent, if any: */
7048 se = cfs_rq->tg->se[cpu];
7049 if (se && !skip_blocked_update(se))
7050 update_load_avg(se, 0);
7052 rq_unlock_irqrestore(rq, &rf);
7056 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
7057 * This needs to be done in a top-down fashion because the load of a child
7058 * group is a fraction of its parents load.
7060 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
7062 struct rq *rq = rq_of(cfs_rq);
7063 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
7064 unsigned long now = jiffies;
7067 if (cfs_rq->last_h_load_update == now)
7070 cfs_rq->h_load_next = NULL;
7071 for_each_sched_entity(se) {
7072 cfs_rq = cfs_rq_of(se);
7073 cfs_rq->h_load_next = se;
7074 if (cfs_rq->last_h_load_update == now)
7079 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
7080 cfs_rq->last_h_load_update = now;
7083 while ((se = cfs_rq->h_load_next) != NULL) {
7084 load = cfs_rq->h_load;
7085 load = div64_ul(load * se->avg.load_avg,
7086 cfs_rq_load_avg(cfs_rq) + 1);
7087 cfs_rq = group_cfs_rq(se);
7088 cfs_rq->h_load = load;
7089 cfs_rq->last_h_load_update = now;
7093 static unsigned long task_h_load(struct task_struct *p)
7095 struct cfs_rq *cfs_rq = task_cfs_rq(p);
7097 update_cfs_rq_h_load(cfs_rq);
7098 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
7099 cfs_rq_load_avg(cfs_rq) + 1);
7102 static inline void update_blocked_averages(int cpu)
7104 struct rq *rq = cpu_rq(cpu);
7105 struct cfs_rq *cfs_rq = &rq->cfs;
7108 rq_lock_irqsave(rq, &rf);
7109 update_rq_clock(rq);
7110 update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
7111 rq_unlock_irqrestore(rq, &rf);
7114 static unsigned long task_h_load(struct task_struct *p)
7116 return p->se.avg.load_avg;
7120 /********** Helpers for find_busiest_group ************************/
7129 * sg_lb_stats - stats of a sched_group required for load_balancing
7131 struct sg_lb_stats {
7132 unsigned long avg_load; /*Avg load across the CPUs of the group */
7133 unsigned long group_load; /* Total load over the CPUs of the group */
7134 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
7135 unsigned long load_per_task;
7136 unsigned long group_capacity;
7137 unsigned long group_util; /* Total utilization of the group */
7138 unsigned int sum_nr_running; /* Nr tasks running in the group */
7139 unsigned int idle_cpus;
7140 unsigned int group_weight;
7141 enum group_type group_type;
7142 int group_no_capacity;
7143 #ifdef CONFIG_NUMA_BALANCING
7144 unsigned int nr_numa_running;
7145 unsigned int nr_preferred_running;
7150 * sd_lb_stats - Structure to store the statistics of a sched_domain
7151 * during load balancing.
7153 struct sd_lb_stats {
7154 struct sched_group *busiest; /* Busiest group in this sd */
7155 struct sched_group *local; /* Local group in this sd */
7156 unsigned long total_load; /* Total load of all groups in sd */
7157 unsigned long total_capacity; /* Total capacity of all groups in sd */
7158 unsigned long avg_load; /* Average load across all groups in sd */
7160 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
7161 struct sg_lb_stats local_stat; /* Statistics of the local group */
7164 static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
7167 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
7168 * local_stat because update_sg_lb_stats() does a full clear/assignment.
7169 * We must however clear busiest_stat::avg_load because
7170 * update_sd_pick_busiest() reads this before assignment.
7172 *sds = (struct sd_lb_stats){
7176 .total_capacity = 0UL,
7179 .sum_nr_running = 0,
7180 .group_type = group_other,
7186 * get_sd_load_idx - Obtain the load index for a given sched domain.
7187 * @sd: The sched_domain whose load_idx is to be obtained.
7188 * @idle: The idle status of the CPU for whose sd load_idx is obtained.
7190 * Return: The load index.
7192 static inline int get_sd_load_idx(struct sched_domain *sd,
7193 enum cpu_idle_type idle)
7199 load_idx = sd->busy_idx;
7202 case CPU_NEWLY_IDLE:
7203 load_idx = sd->newidle_idx;
7206 load_idx = sd->idle_idx;
7213 static unsigned long scale_rt_capacity(int cpu)
7215 struct rq *rq = cpu_rq(cpu);
7216 u64 total, used, age_stamp, avg;
7220 * Since we're reading these variables without serialization make sure
7221 * we read them once before doing sanity checks on them.
7223 age_stamp = READ_ONCE(rq->age_stamp);
7224 avg = READ_ONCE(rq->rt_avg);
7225 delta = __rq_clock_broken(rq) - age_stamp;
7227 if (unlikely(delta < 0))
7230 total = sched_avg_period() + delta;
7232 used = div_u64(avg, total);
7234 if (likely(used < SCHED_CAPACITY_SCALE))
7235 return SCHED_CAPACITY_SCALE - used;
7240 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
7242 unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
7243 struct sched_group *sdg = sd->groups;
7245 cpu_rq(cpu)->cpu_capacity_orig = capacity;
7247 capacity *= scale_rt_capacity(cpu);
7248 capacity >>= SCHED_CAPACITY_SHIFT;
7253 cpu_rq(cpu)->cpu_capacity = capacity;
7254 sdg->sgc->capacity = capacity;
7255 sdg->sgc->min_capacity = capacity;
7258 void update_group_capacity(struct sched_domain *sd, int cpu)
7260 struct sched_domain *child = sd->child;
7261 struct sched_group *group, *sdg = sd->groups;
7262 unsigned long capacity, min_capacity;
7263 unsigned long interval;
7265 interval = msecs_to_jiffies(sd->balance_interval);
7266 interval = clamp(interval, 1UL, max_load_balance_interval);
7267 sdg->sgc->next_update = jiffies + interval;
7270 update_cpu_capacity(sd, cpu);
7275 min_capacity = ULONG_MAX;
7277 if (child->flags & SD_OVERLAP) {
7279 * SD_OVERLAP domains cannot assume that child groups
7280 * span the current group.
7283 for_each_cpu(cpu, sched_group_cpus(sdg)) {
7284 struct sched_group_capacity *sgc;
7285 struct rq *rq = cpu_rq(cpu);
7288 * build_sched_domains() -> init_sched_groups_capacity()
7289 * gets here before we've attached the domains to the
7292 * Use capacity_of(), which is set irrespective of domains
7293 * in update_cpu_capacity().
7295 * This avoids capacity from being 0 and
7296 * causing divide-by-zero issues on boot.
7298 if (unlikely(!rq->sd)) {
7299 capacity += capacity_of(cpu);
7301 sgc = rq->sd->groups->sgc;
7302 capacity += sgc->capacity;
7305 min_capacity = min(capacity, min_capacity);
7309 * !SD_OVERLAP domains can assume that child groups
7310 * span the current group.
7313 group = child->groups;
7315 struct sched_group_capacity *sgc = group->sgc;
7317 capacity += sgc->capacity;
7318 min_capacity = min(sgc->min_capacity, min_capacity);
7319 group = group->next;
7320 } while (group != child->groups);
7323 sdg->sgc->capacity = capacity;
7324 sdg->sgc->min_capacity = min_capacity;
7328 * Check whether the capacity of the rq has been noticeably reduced by side
7329 * activity. The imbalance_pct is used for the threshold.
7330 * Return true is the capacity is reduced
7333 check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
7335 return ((rq->cpu_capacity * sd->imbalance_pct) <
7336 (rq->cpu_capacity_orig * 100));
7340 * Group imbalance indicates (and tries to solve) the problem where balancing
7341 * groups is inadequate due to ->cpus_allowed constraints.
7343 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
7344 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
7347 * { 0 1 2 3 } { 4 5 6 7 }
7350 * If we were to balance group-wise we'd place two tasks in the first group and
7351 * two tasks in the second group. Clearly this is undesired as it will overload
7352 * cpu 3 and leave one of the cpus in the second group unused.
7354 * The current solution to this issue is detecting the skew in the first group
7355 * by noticing the lower domain failed to reach balance and had difficulty
7356 * moving tasks due to affinity constraints.
7358 * When this is so detected; this group becomes a candidate for busiest; see
7359 * update_sd_pick_busiest(). And calculate_imbalance() and
7360 * find_busiest_group() avoid some of the usual balance conditions to allow it
7361 * to create an effective group imbalance.
7363 * This is a somewhat tricky proposition since the next run might not find the
7364 * group imbalance and decide the groups need to be balanced again. A most
7365 * subtle and fragile situation.
7368 static inline int sg_imbalanced(struct sched_group *group)
7370 return group->sgc->imbalance;
7374 * group_has_capacity returns true if the group has spare capacity that could
7375 * be used by some tasks.
7376 * We consider that a group has spare capacity if the * number of task is
7377 * smaller than the number of CPUs or if the utilization is lower than the
7378 * available capacity for CFS tasks.
7379 * For the latter, we use a threshold to stabilize the state, to take into
7380 * account the variance of the tasks' load and to return true if the available
7381 * capacity in meaningful for the load balancer.
7382 * As an example, an available capacity of 1% can appear but it doesn't make
7383 * any benefit for the load balance.
7386 group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
7388 if (sgs->sum_nr_running < sgs->group_weight)
7391 if ((sgs->group_capacity * 100) >
7392 (sgs->group_util * env->sd->imbalance_pct))
7399 * group_is_overloaded returns true if the group has more tasks than it can
7401 * group_is_overloaded is not equals to !group_has_capacity because a group
7402 * with the exact right number of tasks, has no more spare capacity but is not
7403 * overloaded so both group_has_capacity and group_is_overloaded return
7407 group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
7409 if (sgs->sum_nr_running <= sgs->group_weight)
7412 if ((sgs->group_capacity * 100) <
7413 (sgs->group_util * env->sd->imbalance_pct))
7420 * group_smaller_cpu_capacity: Returns true if sched_group sg has smaller
7421 * per-CPU capacity than sched_group ref.
7424 group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
7426 return sg->sgc->min_capacity * capacity_margin <
7427 ref->sgc->min_capacity * 1024;
7431 group_type group_classify(struct sched_group *group,
7432 struct sg_lb_stats *sgs)
7434 if (sgs->group_no_capacity)
7435 return group_overloaded;
7437 if (sg_imbalanced(group))
7438 return group_imbalanced;
7444 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
7445 * @env: The load balancing environment.
7446 * @group: sched_group whose statistics are to be updated.
7447 * @load_idx: Load index of sched_domain of this_cpu for load calc.
7448 * @local_group: Does group contain this_cpu.
7449 * @sgs: variable to hold the statistics for this group.
7450 * @overload: Indicate more than one runnable task for any CPU.
7452 static inline void update_sg_lb_stats(struct lb_env *env,
7453 struct sched_group *group, int load_idx,
7454 int local_group, struct sg_lb_stats *sgs,
7460 memset(sgs, 0, sizeof(*sgs));
7462 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
7463 struct rq *rq = cpu_rq(i);
7465 /* Bias balancing toward cpus of our domain */
7467 load = target_load(i, load_idx);
7469 load = source_load(i, load_idx);
7471 sgs->group_load += load;
7472 sgs->group_util += cpu_util(i);
7473 sgs->sum_nr_running += rq->cfs.h_nr_running;
7475 nr_running = rq->nr_running;
7479 #ifdef CONFIG_NUMA_BALANCING
7480 sgs->nr_numa_running += rq->nr_numa_running;
7481 sgs->nr_preferred_running += rq->nr_preferred_running;
7483 sgs->sum_weighted_load += weighted_cpuload(i);
7485 * No need to call idle_cpu() if nr_running is not 0
7487 if (!nr_running && idle_cpu(i))
7491 /* Adjust by relative CPU capacity of the group */
7492 sgs->group_capacity = group->sgc->capacity;
7493 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
7495 if (sgs->sum_nr_running)
7496 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
7498 sgs->group_weight = group->group_weight;
7500 sgs->group_no_capacity = group_is_overloaded(env, sgs);
7501 sgs->group_type = group_classify(group, sgs);
7505 * update_sd_pick_busiest - return 1 on busiest group
7506 * @env: The load balancing environment.
7507 * @sds: sched_domain statistics
7508 * @sg: sched_group candidate to be checked for being the busiest
7509 * @sgs: sched_group statistics
7511 * Determine if @sg is a busier group than the previously selected
7514 * Return: %true if @sg is a busier group than the previously selected
7515 * busiest group. %false otherwise.
7517 static bool update_sd_pick_busiest(struct lb_env *env,
7518 struct sd_lb_stats *sds,
7519 struct sched_group *sg,
7520 struct sg_lb_stats *sgs)
7522 struct sg_lb_stats *busiest = &sds->busiest_stat;
7524 if (sgs->group_type > busiest->group_type)
7527 if (sgs->group_type < busiest->group_type)
7530 if (sgs->avg_load <= busiest->avg_load)
7533 if (!(env->sd->flags & SD_ASYM_CPUCAPACITY))
7537 * Candidate sg has no more than one task per CPU and
7538 * has higher per-CPU capacity. Migrating tasks to less
7539 * capable CPUs may harm throughput. Maximize throughput,
7540 * power/energy consequences are not considered.
7542 if (sgs->sum_nr_running <= sgs->group_weight &&
7543 group_smaller_cpu_capacity(sds->local, sg))
7547 /* This is the busiest node in its class. */
7548 if (!(env->sd->flags & SD_ASYM_PACKING))
7551 /* No ASYM_PACKING if target cpu is already busy */
7552 if (env->idle == CPU_NOT_IDLE)
7555 * ASYM_PACKING needs to move all the work to the highest
7556 * prority CPUs in the group, therefore mark all groups
7557 * of lower priority than ourself as busy.
7559 if (sgs->sum_nr_running &&
7560 sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
7564 /* Prefer to move from lowest priority cpu's work */
7565 if (sched_asym_prefer(sds->busiest->asym_prefer_cpu,
7566 sg->asym_prefer_cpu))
7573 #ifdef CONFIG_NUMA_BALANCING
7574 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
7576 if (sgs->sum_nr_running > sgs->nr_numa_running)
7578 if (sgs->sum_nr_running > sgs->nr_preferred_running)
7583 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
7585 if (rq->nr_running > rq->nr_numa_running)
7587 if (rq->nr_running > rq->nr_preferred_running)
7592 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
7597 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
7601 #endif /* CONFIG_NUMA_BALANCING */
7604 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
7605 * @env: The load balancing environment.
7606 * @sds: variable to hold the statistics for this sched_domain.
7608 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
7610 struct sched_domain *child = env->sd->child;
7611 struct sched_group *sg = env->sd->groups;
7612 struct sg_lb_stats *local = &sds->local_stat;
7613 struct sg_lb_stats tmp_sgs;
7614 int load_idx, prefer_sibling = 0;
7615 bool overload = false;
7617 if (child && child->flags & SD_PREFER_SIBLING)
7620 load_idx = get_sd_load_idx(env->sd, env->idle);
7623 struct sg_lb_stats *sgs = &tmp_sgs;
7626 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
7631 if (env->idle != CPU_NEWLY_IDLE ||
7632 time_after_eq(jiffies, sg->sgc->next_update))
7633 update_group_capacity(env->sd, env->dst_cpu);
7636 update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
7643 * In case the child domain prefers tasks go to siblings
7644 * first, lower the sg capacity so that we'll try
7645 * and move all the excess tasks away. We lower the capacity
7646 * of a group only if the local group has the capacity to fit
7647 * these excess tasks. The extra check prevents the case where
7648 * you always pull from the heaviest group when it is already
7649 * under-utilized (possible with a large weight task outweighs
7650 * the tasks on the system).
7652 if (prefer_sibling && sds->local &&
7653 group_has_capacity(env, local) &&
7654 (sgs->sum_nr_running > local->sum_nr_running + 1)) {
7655 sgs->group_no_capacity = 1;
7656 sgs->group_type = group_classify(sg, sgs);
7659 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
7661 sds->busiest_stat = *sgs;
7665 /* Now, start updating sd_lb_stats */
7666 sds->total_load += sgs->group_load;
7667 sds->total_capacity += sgs->group_capacity;
7670 } while (sg != env->sd->groups);
7672 if (env->sd->flags & SD_NUMA)
7673 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
7675 if (!env->sd->parent) {
7676 /* update overload indicator if we are at root domain */
7677 if (env->dst_rq->rd->overload != overload)
7678 env->dst_rq->rd->overload = overload;
7684 * check_asym_packing - Check to see if the group is packed into the
7687 * This is primarily intended to used at the sibling level. Some
7688 * cores like POWER7 prefer to use lower numbered SMT threads. In the
7689 * case of POWER7, it can move to lower SMT modes only when higher
7690 * threads are idle. When in lower SMT modes, the threads will
7691 * perform better since they share less core resources. Hence when we
7692 * have idle threads, we want them to be the higher ones.
7694 * This packing function is run on idle threads. It checks to see if
7695 * the busiest CPU in this domain (core in the P7 case) has a higher
7696 * CPU number than the packing function is being run on. Here we are
7697 * assuming lower CPU number will be equivalent to lower a SMT thread
7700 * Return: 1 when packing is required and a task should be moved to
7701 * this CPU. The amount of the imbalance is returned in *imbalance.
7703 * @env: The load balancing environment.
7704 * @sds: Statistics of the sched_domain which is to be packed
7706 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
7710 if (!(env->sd->flags & SD_ASYM_PACKING))
7713 if (env->idle == CPU_NOT_IDLE)
7719 busiest_cpu = sds->busiest->asym_prefer_cpu;
7720 if (sched_asym_prefer(busiest_cpu, env->dst_cpu))
7723 env->imbalance = DIV_ROUND_CLOSEST(
7724 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
7725 SCHED_CAPACITY_SCALE);
7731 * fix_small_imbalance - Calculate the minor imbalance that exists
7732 * amongst the groups of a sched_domain, during
7734 * @env: The load balancing environment.
7735 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
7738 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
7740 unsigned long tmp, capa_now = 0, capa_move = 0;
7741 unsigned int imbn = 2;
7742 unsigned long scaled_busy_load_per_task;
7743 struct sg_lb_stats *local, *busiest;
7745 local = &sds->local_stat;
7746 busiest = &sds->busiest_stat;
7748 if (!local->sum_nr_running)
7749 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
7750 else if (busiest->load_per_task > local->load_per_task)
7753 scaled_busy_load_per_task =
7754 (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
7755 busiest->group_capacity;
7757 if (busiest->avg_load + scaled_busy_load_per_task >=
7758 local->avg_load + (scaled_busy_load_per_task * imbn)) {
7759 env->imbalance = busiest->load_per_task;
7764 * OK, we don't have enough imbalance to justify moving tasks,
7765 * however we may be able to increase total CPU capacity used by
7769 capa_now += busiest->group_capacity *
7770 min(busiest->load_per_task, busiest->avg_load);
7771 capa_now += local->group_capacity *
7772 min(local->load_per_task, local->avg_load);
7773 capa_now /= SCHED_CAPACITY_SCALE;
7775 /* Amount of load we'd subtract */
7776 if (busiest->avg_load > scaled_busy_load_per_task) {
7777 capa_move += busiest->group_capacity *
7778 min(busiest->load_per_task,
7779 busiest->avg_load - scaled_busy_load_per_task);
7782 /* Amount of load we'd add */
7783 if (busiest->avg_load * busiest->group_capacity <
7784 busiest->load_per_task * SCHED_CAPACITY_SCALE) {
7785 tmp = (busiest->avg_load * busiest->group_capacity) /
7786 local->group_capacity;
7788 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
7789 local->group_capacity;
7791 capa_move += local->group_capacity *
7792 min(local->load_per_task, local->avg_load + tmp);
7793 capa_move /= SCHED_CAPACITY_SCALE;
7795 /* Move if we gain throughput */
7796 if (capa_move > capa_now)
7797 env->imbalance = busiest->load_per_task;
7801 * calculate_imbalance - Calculate the amount of imbalance present within the
7802 * groups of a given sched_domain during load balance.
7803 * @env: load balance environment
7804 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
7806 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
7808 unsigned long max_pull, load_above_capacity = ~0UL;
7809 struct sg_lb_stats *local, *busiest;
7811 local = &sds->local_stat;
7812 busiest = &sds->busiest_stat;
7814 if (busiest->group_type == group_imbalanced) {
7816 * In the group_imb case we cannot rely on group-wide averages
7817 * to ensure cpu-load equilibrium, look at wider averages. XXX
7819 busiest->load_per_task =
7820 min(busiest->load_per_task, sds->avg_load);
7824 * Avg load of busiest sg can be less and avg load of local sg can
7825 * be greater than avg load across all sgs of sd because avg load
7826 * factors in sg capacity and sgs with smaller group_type are
7827 * skipped when updating the busiest sg:
7829 if (busiest->avg_load <= sds->avg_load ||
7830 local->avg_load >= sds->avg_load) {
7832 return fix_small_imbalance(env, sds);
7836 * If there aren't any idle cpus, avoid creating some.
7838 if (busiest->group_type == group_overloaded &&
7839 local->group_type == group_overloaded) {
7840 load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE;
7841 if (load_above_capacity > busiest->group_capacity) {
7842 load_above_capacity -= busiest->group_capacity;
7843 load_above_capacity *= scale_load_down(NICE_0_LOAD);
7844 load_above_capacity /= busiest->group_capacity;
7846 load_above_capacity = ~0UL;
7850 * We're trying to get all the cpus to the average_load, so we don't
7851 * want to push ourselves above the average load, nor do we wish to
7852 * reduce the max loaded cpu below the average load. At the same time,
7853 * we also don't want to reduce the group load below the group
7854 * capacity. Thus we look for the minimum possible imbalance.
7856 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
7858 /* How much load to actually move to equalise the imbalance */
7859 env->imbalance = min(
7860 max_pull * busiest->group_capacity,
7861 (sds->avg_load - local->avg_load) * local->group_capacity
7862 ) / SCHED_CAPACITY_SCALE;
7865 * if *imbalance is less than the average load per runnable task
7866 * there is no guarantee that any tasks will be moved so we'll have
7867 * a think about bumping its value to force at least one task to be
7870 if (env->imbalance < busiest->load_per_task)
7871 return fix_small_imbalance(env, sds);
7874 /******* find_busiest_group() helpers end here *********************/
7877 * find_busiest_group - Returns the busiest group within the sched_domain
7878 * if there is an imbalance.
7880 * Also calculates the amount of weighted load which should be moved
7881 * to restore balance.
7883 * @env: The load balancing environment.
7885 * Return: - The busiest group if imbalance exists.
7887 static struct sched_group *find_busiest_group(struct lb_env *env)
7889 struct sg_lb_stats *local, *busiest;
7890 struct sd_lb_stats sds;
7892 init_sd_lb_stats(&sds);
7895 * Compute the various statistics relavent for load balancing at
7898 update_sd_lb_stats(env, &sds);
7899 local = &sds.local_stat;
7900 busiest = &sds.busiest_stat;
7902 /* ASYM feature bypasses nice load balance check */
7903 if (check_asym_packing(env, &sds))
7906 /* There is no busy sibling group to pull tasks from */
7907 if (!sds.busiest || busiest->sum_nr_running == 0)
7910 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
7911 / sds.total_capacity;
7914 * If the busiest group is imbalanced the below checks don't
7915 * work because they assume all things are equal, which typically
7916 * isn't true due to cpus_allowed constraints and the like.
7918 if (busiest->group_type == group_imbalanced)
7921 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
7922 if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
7923 busiest->group_no_capacity)
7927 * If the local group is busier than the selected busiest group
7928 * don't try and pull any tasks.
7930 if (local->avg_load >= busiest->avg_load)
7934 * Don't pull any tasks if this group is already above the domain
7937 if (local->avg_load >= sds.avg_load)
7940 if (env->idle == CPU_IDLE) {
7942 * This cpu is idle. If the busiest group is not overloaded
7943 * and there is no imbalance between this and busiest group
7944 * wrt idle cpus, it is balanced. The imbalance becomes
7945 * significant if the diff is greater than 1 otherwise we
7946 * might end up to just move the imbalance on another group
7948 if ((busiest->group_type != group_overloaded) &&
7949 (local->idle_cpus <= (busiest->idle_cpus + 1)))
7953 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
7954 * imbalance_pct to be conservative.
7956 if (100 * busiest->avg_load <=
7957 env->sd->imbalance_pct * local->avg_load)
7962 /* Looks like there is an imbalance. Compute it */
7963 calculate_imbalance(env, &sds);
7972 * find_busiest_queue - find the busiest runqueue among the cpus in group.
7974 static struct rq *find_busiest_queue(struct lb_env *env,
7975 struct sched_group *group)
7977 struct rq *busiest = NULL, *rq;
7978 unsigned long busiest_load = 0, busiest_capacity = 1;
7981 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
7982 unsigned long capacity, wl;
7986 rt = fbq_classify_rq(rq);
7989 * We classify groups/runqueues into three groups:
7990 * - regular: there are !numa tasks
7991 * - remote: there are numa tasks that run on the 'wrong' node
7992 * - all: there is no distinction
7994 * In order to avoid migrating ideally placed numa tasks,
7995 * ignore those when there's better options.
7997 * If we ignore the actual busiest queue to migrate another
7998 * task, the next balance pass can still reduce the busiest
7999 * queue by moving tasks around inside the node.
8001 * If we cannot move enough load due to this classification
8002 * the next pass will adjust the group classification and
8003 * allow migration of more tasks.
8005 * Both cases only affect the total convergence complexity.
8007 if (rt > env->fbq_type)
8010 capacity = capacity_of(i);
8012 wl = weighted_cpuload(i);
8015 * When comparing with imbalance, use weighted_cpuload()
8016 * which is not scaled with the cpu capacity.
8019 if (rq->nr_running == 1 && wl > env->imbalance &&
8020 !check_cpu_capacity(rq, env->sd))
8024 * For the load comparisons with the other cpu's, consider
8025 * the weighted_cpuload() scaled with the cpu capacity, so
8026 * that the load can be moved away from the cpu that is
8027 * potentially running at a lower capacity.
8029 * Thus we're looking for max(wl_i / capacity_i), crosswise
8030 * multiplication to rid ourselves of the division works out
8031 * to: wl_i * capacity_j > wl_j * capacity_i; where j is
8032 * our previous maximum.
8034 if (wl * busiest_capacity > busiest_load * capacity) {
8036 busiest_capacity = capacity;
8045 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
8046 * so long as it is large enough.
8048 #define MAX_PINNED_INTERVAL 512
8050 static int need_active_balance(struct lb_env *env)
8052 struct sched_domain *sd = env->sd;
8054 if (env->idle == CPU_NEWLY_IDLE) {
8057 * ASYM_PACKING needs to force migrate tasks from busy but
8058 * lower priority CPUs in order to pack all tasks in the
8059 * highest priority CPUs.
8061 if ((sd->flags & SD_ASYM_PACKING) &&
8062 sched_asym_prefer(env->dst_cpu, env->src_cpu))
8067 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
8068 * It's worth migrating the task if the src_cpu's capacity is reduced
8069 * because of other sched_class or IRQs if more capacity stays
8070 * available on dst_cpu.
8072 if ((env->idle != CPU_NOT_IDLE) &&
8073 (env->src_rq->cfs.h_nr_running == 1)) {
8074 if ((check_cpu_capacity(env->src_rq, sd)) &&
8075 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
8079 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
8082 static int active_load_balance_cpu_stop(void *data);
8084 static int should_we_balance(struct lb_env *env)
8086 struct sched_group *sg = env->sd->groups;
8087 struct cpumask *sg_cpus, *sg_mask;
8088 int cpu, balance_cpu = -1;
8091 * In the newly idle case, we will allow all the cpu's
8092 * to do the newly idle load balance.
8094 if (env->idle == CPU_NEWLY_IDLE)
8097 sg_cpus = sched_group_cpus(sg);
8098 sg_mask = sched_group_mask(sg);
8099 /* Try to find first idle cpu */
8100 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
8101 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
8108 if (balance_cpu == -1)
8109 balance_cpu = group_balance_cpu(sg);
8112 * First idle cpu or the first cpu(busiest) in this sched group
8113 * is eligible for doing load balancing at this and above domains.
8115 return balance_cpu == env->dst_cpu;
8119 * Check this_cpu to ensure it is balanced within domain. Attempt to move
8120 * tasks if there is an imbalance.
8122 static int load_balance(int this_cpu, struct rq *this_rq,
8123 struct sched_domain *sd, enum cpu_idle_type idle,
8124 int *continue_balancing)
8126 int ld_moved, cur_ld_moved, active_balance = 0;
8127 struct sched_domain *sd_parent = sd->parent;
8128 struct sched_group *group;
8131 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
8133 struct lb_env env = {
8135 .dst_cpu = this_cpu,
8137 .dst_grpmask = sched_group_cpus(sd->groups),
8139 .loop_break = sched_nr_migrate_break,
8142 .tasks = LIST_HEAD_INIT(env.tasks),
8146 * For NEWLY_IDLE load_balancing, we don't need to consider
8147 * other cpus in our group
8149 if (idle == CPU_NEWLY_IDLE)
8150 env.dst_grpmask = NULL;
8152 cpumask_copy(cpus, cpu_active_mask);
8154 schedstat_inc(sd->lb_count[idle]);
8157 if (!should_we_balance(&env)) {
8158 *continue_balancing = 0;
8162 group = find_busiest_group(&env);
8164 schedstat_inc(sd->lb_nobusyg[idle]);
8168 busiest = find_busiest_queue(&env, group);
8170 schedstat_inc(sd->lb_nobusyq[idle]);
8174 BUG_ON(busiest == env.dst_rq);
8176 schedstat_add(sd->lb_imbalance[idle], env.imbalance);
8178 env.src_cpu = busiest->cpu;
8179 env.src_rq = busiest;
8182 if (busiest->nr_running > 1) {
8184 * Attempt to move tasks. If find_busiest_group has found
8185 * an imbalance but busiest->nr_running <= 1, the group is
8186 * still unbalanced. ld_moved simply stays zero, so it is
8187 * correctly treated as an imbalance.
8189 env.flags |= LBF_ALL_PINNED;
8190 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
8193 rq_lock_irqsave(busiest, &rf);
8194 update_rq_clock(busiest);
8197 * cur_ld_moved - load moved in current iteration
8198 * ld_moved - cumulative load moved across iterations
8200 cur_ld_moved = detach_tasks(&env);
8203 * We've detached some tasks from busiest_rq. Every
8204 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
8205 * unlock busiest->lock, and we are able to be sure
8206 * that nobody can manipulate the tasks in parallel.
8207 * See task_rq_lock() family for the details.
8210 rq_unlock(busiest, &rf);
8214 ld_moved += cur_ld_moved;
8217 local_irq_restore(rf.flags);
8219 if (env.flags & LBF_NEED_BREAK) {
8220 env.flags &= ~LBF_NEED_BREAK;
8225 * Revisit (affine) tasks on src_cpu that couldn't be moved to
8226 * us and move them to an alternate dst_cpu in our sched_group
8227 * where they can run. The upper limit on how many times we
8228 * iterate on same src_cpu is dependent on number of cpus in our
8231 * This changes load balance semantics a bit on who can move
8232 * load to a given_cpu. In addition to the given_cpu itself
8233 * (or a ilb_cpu acting on its behalf where given_cpu is
8234 * nohz-idle), we now have balance_cpu in a position to move
8235 * load to given_cpu. In rare situations, this may cause
8236 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
8237 * _independently_ and at _same_ time to move some load to
8238 * given_cpu) causing exceess load to be moved to given_cpu.
8239 * This however should not happen so much in practice and
8240 * moreover subsequent load balance cycles should correct the
8241 * excess load moved.
8243 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
8245 /* Prevent to re-select dst_cpu via env's cpus */
8246 cpumask_clear_cpu(env.dst_cpu, env.cpus);
8248 env.dst_rq = cpu_rq(env.new_dst_cpu);
8249 env.dst_cpu = env.new_dst_cpu;
8250 env.flags &= ~LBF_DST_PINNED;
8252 env.loop_break = sched_nr_migrate_break;
8255 * Go back to "more_balance" rather than "redo" since we
8256 * need to continue with same src_cpu.
8262 * We failed to reach balance because of affinity.
8265 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
8267 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
8268 *group_imbalance = 1;
8271 /* All tasks on this runqueue were pinned by CPU affinity */
8272 if (unlikely(env.flags & LBF_ALL_PINNED)) {
8273 cpumask_clear_cpu(cpu_of(busiest), cpus);
8274 if (!cpumask_empty(cpus)) {
8276 env.loop_break = sched_nr_migrate_break;
8279 goto out_all_pinned;
8284 schedstat_inc(sd->lb_failed[idle]);
8286 * Increment the failure counter only on periodic balance.
8287 * We do not want newidle balance, which can be very
8288 * frequent, pollute the failure counter causing
8289 * excessive cache_hot migrations and active balances.
8291 if (idle != CPU_NEWLY_IDLE)
8292 sd->nr_balance_failed++;
8294 if (need_active_balance(&env)) {
8295 unsigned long flags;
8297 raw_spin_lock_irqsave(&busiest->lock, flags);
8299 /* don't kick the active_load_balance_cpu_stop,
8300 * if the curr task on busiest cpu can't be
8303 if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
8304 raw_spin_unlock_irqrestore(&busiest->lock,
8306 env.flags |= LBF_ALL_PINNED;
8307 goto out_one_pinned;
8311 * ->active_balance synchronizes accesses to
8312 * ->active_balance_work. Once set, it's cleared
8313 * only after active load balance is finished.
8315 if (!busiest->active_balance) {
8316 busiest->active_balance = 1;
8317 busiest->push_cpu = this_cpu;
8320 raw_spin_unlock_irqrestore(&busiest->lock, flags);
8322 if (active_balance) {
8323 stop_one_cpu_nowait(cpu_of(busiest),
8324 active_load_balance_cpu_stop, busiest,
8325 &busiest->active_balance_work);
8328 /* We've kicked active balancing, force task migration. */
8329 sd->nr_balance_failed = sd->cache_nice_tries+1;
8332 sd->nr_balance_failed = 0;
8334 if (likely(!active_balance)) {
8335 /* We were unbalanced, so reset the balancing interval */
8336 sd->balance_interval = sd->min_interval;
8339 * If we've begun active balancing, start to back off. This
8340 * case may not be covered by the all_pinned logic if there
8341 * is only 1 task on the busy runqueue (because we don't call
8344 if (sd->balance_interval < sd->max_interval)
8345 sd->balance_interval *= 2;
8352 * We reach balance although we may have faced some affinity
8353 * constraints. Clear the imbalance flag if it was set.
8356 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
8358 if (*group_imbalance)
8359 *group_imbalance = 0;
8364 * We reach balance because all tasks are pinned at this level so
8365 * we can't migrate them. Let the imbalance flag set so parent level
8366 * can try to migrate them.
8368 schedstat_inc(sd->lb_balanced[idle]);
8370 sd->nr_balance_failed = 0;
8373 /* tune up the balancing interval */
8374 if (((env.flags & LBF_ALL_PINNED) &&
8375 sd->balance_interval < MAX_PINNED_INTERVAL) ||
8376 (sd->balance_interval < sd->max_interval))
8377 sd->balance_interval *= 2;
8384 static inline unsigned long
8385 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
8387 unsigned long interval = sd->balance_interval;
8390 interval *= sd->busy_factor;
8392 /* scale ms to jiffies */
8393 interval = msecs_to_jiffies(interval);
8394 interval = clamp(interval, 1UL, max_load_balance_interval);
8400 update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
8402 unsigned long interval, next;
8404 /* used by idle balance, so cpu_busy = 0 */
8405 interval = get_sd_balance_interval(sd, 0);
8406 next = sd->last_balance + interval;
8408 if (time_after(*next_balance, next))
8409 *next_balance = next;
8413 * idle_balance is called by schedule() if this_cpu is about to become
8414 * idle. Attempts to pull tasks from other CPUs.
8416 static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
8418 unsigned long next_balance = jiffies + HZ;
8419 int this_cpu = this_rq->cpu;
8420 struct sched_domain *sd;
8421 int pulled_task = 0;
8425 * We must set idle_stamp _before_ calling idle_balance(), such that we
8426 * measure the duration of idle_balance() as idle time.
8428 this_rq->idle_stamp = rq_clock(this_rq);
8431 * This is OK, because current is on_cpu, which avoids it being picked
8432 * for load-balance and preemption/IRQs are still disabled avoiding
8433 * further scheduler activity on it and we're being very careful to
8434 * re-start the picking loop.
8436 rq_unpin_lock(this_rq, rf);
8438 if (this_rq->avg_idle < sysctl_sched_migration_cost ||
8439 !this_rq->rd->overload) {
8441 sd = rcu_dereference_check_sched_domain(this_rq->sd);
8443 update_next_balance(sd, &next_balance);
8449 raw_spin_unlock(&this_rq->lock);
8451 update_blocked_averages(this_cpu);
8453 for_each_domain(this_cpu, sd) {
8454 int continue_balancing = 1;
8455 u64 t0, domain_cost;
8457 if (!(sd->flags & SD_LOAD_BALANCE))
8460 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
8461 update_next_balance(sd, &next_balance);
8465 if (sd->flags & SD_BALANCE_NEWIDLE) {
8466 t0 = sched_clock_cpu(this_cpu);
8468 pulled_task = load_balance(this_cpu, this_rq,
8470 &continue_balancing);
8472 domain_cost = sched_clock_cpu(this_cpu) - t0;
8473 if (domain_cost > sd->max_newidle_lb_cost)
8474 sd->max_newidle_lb_cost = domain_cost;
8476 curr_cost += domain_cost;
8479 update_next_balance(sd, &next_balance);
8482 * Stop searching for tasks to pull if there are
8483 * now runnable tasks on this rq.
8485 if (pulled_task || this_rq->nr_running > 0)
8490 raw_spin_lock(&this_rq->lock);
8492 if (curr_cost > this_rq->max_idle_balance_cost)
8493 this_rq->max_idle_balance_cost = curr_cost;
8496 * While browsing the domains, we released the rq lock, a task could
8497 * have been enqueued in the meantime. Since we're not going idle,
8498 * pretend we pulled a task.
8500 if (this_rq->cfs.h_nr_running && !pulled_task)
8504 /* Move the next balance forward */
8505 if (time_after(this_rq->next_balance, next_balance))
8506 this_rq->next_balance = next_balance;
8508 /* Is there a task of a high priority class? */
8509 if (this_rq->nr_running != this_rq->cfs.h_nr_running)
8513 this_rq->idle_stamp = 0;
8515 rq_repin_lock(this_rq, rf);
8521 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
8522 * running tasks off the busiest CPU onto idle CPUs. It requires at
8523 * least 1 task to be running on each physical CPU where possible, and
8524 * avoids physical / logical imbalances.
8526 static int active_load_balance_cpu_stop(void *data)
8528 struct rq *busiest_rq = data;
8529 int busiest_cpu = cpu_of(busiest_rq);
8530 int target_cpu = busiest_rq->push_cpu;
8531 struct rq *target_rq = cpu_rq(target_cpu);
8532 struct sched_domain *sd;
8533 struct task_struct *p = NULL;
8536 rq_lock_irq(busiest_rq, &rf);
8538 /* make sure the requested cpu hasn't gone down in the meantime */
8539 if (unlikely(busiest_cpu != smp_processor_id() ||
8540 !busiest_rq->active_balance))
8543 /* Is there any task to move? */
8544 if (busiest_rq->nr_running <= 1)
8548 * This condition is "impossible", if it occurs
8549 * we need to fix it. Originally reported by
8550 * Bjorn Helgaas on a 128-cpu setup.
8552 BUG_ON(busiest_rq == target_rq);
8554 /* Search for an sd spanning us and the target CPU. */
8556 for_each_domain(target_cpu, sd) {
8557 if ((sd->flags & SD_LOAD_BALANCE) &&
8558 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
8563 struct lb_env env = {
8565 .dst_cpu = target_cpu,
8566 .dst_rq = target_rq,
8567 .src_cpu = busiest_rq->cpu,
8568 .src_rq = busiest_rq,
8572 schedstat_inc(sd->alb_count);
8573 update_rq_clock(busiest_rq);
8575 p = detach_one_task(&env);
8577 schedstat_inc(sd->alb_pushed);
8578 /* Active balancing done, reset the failure counter. */
8579 sd->nr_balance_failed = 0;
8581 schedstat_inc(sd->alb_failed);
8586 busiest_rq->active_balance = 0;
8587 rq_unlock(busiest_rq, &rf);
8590 attach_one_task(target_rq, p);
8597 static inline int on_null_domain(struct rq *rq)
8599 return unlikely(!rcu_dereference_sched(rq->sd));
8602 #ifdef CONFIG_NO_HZ_COMMON
8604 * idle load balancing details
8605 * - When one of the busy CPUs notice that there may be an idle rebalancing
8606 * needed, they will kick the idle load balancer, which then does idle
8607 * load balancing for all the idle CPUs.
8610 cpumask_var_t idle_cpus_mask;
8612 unsigned long next_balance; /* in jiffy units */
8613 } nohz ____cacheline_aligned;
8615 static inline int find_new_ilb(void)
8617 int ilb = cpumask_first(nohz.idle_cpus_mask);
8619 if (ilb < nr_cpu_ids && idle_cpu(ilb))
8626 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
8627 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
8628 * CPU (if there is one).
8630 static void nohz_balancer_kick(void)
8634 nohz.next_balance++;
8636 ilb_cpu = find_new_ilb();
8638 if (ilb_cpu >= nr_cpu_ids)
8641 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
8644 * Use smp_send_reschedule() instead of resched_cpu().
8645 * This way we generate a sched IPI on the target cpu which
8646 * is idle. And the softirq performing nohz idle load balance
8647 * will be run before returning from the IPI.
8649 smp_send_reschedule(ilb_cpu);
8653 void nohz_balance_exit_idle(unsigned int cpu)
8655 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
8657 * Completely isolated CPUs don't ever set, so we must test.
8659 if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
8660 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
8661 atomic_dec(&nohz.nr_cpus);
8663 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
8667 static inline void set_cpu_sd_state_busy(void)
8669 struct sched_domain *sd;
8670 int cpu = smp_processor_id();
8673 sd = rcu_dereference(per_cpu(sd_llc, cpu));
8675 if (!sd || !sd->nohz_idle)
8679 atomic_inc(&sd->shared->nr_busy_cpus);
8684 void set_cpu_sd_state_idle(void)
8686 struct sched_domain *sd;
8687 int cpu = smp_processor_id();
8690 sd = rcu_dereference(per_cpu(sd_llc, cpu));
8692 if (!sd || sd->nohz_idle)
8696 atomic_dec(&sd->shared->nr_busy_cpus);
8702 * This routine will record that the cpu is going idle with tick stopped.
8703 * This info will be used in performing idle load balancing in the future.
8705 void nohz_balance_enter_idle(int cpu)
8708 * If this cpu is going down, then nothing needs to be done.
8710 if (!cpu_active(cpu))
8713 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
8717 * If we're a completely isolated CPU, we don't play.
8719 if (on_null_domain(cpu_rq(cpu)))
8722 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
8723 atomic_inc(&nohz.nr_cpus);
8724 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
8728 static DEFINE_SPINLOCK(balancing);
8731 * Scale the max load_balance interval with the number of CPUs in the system.
8732 * This trades load-balance latency on larger machines for less cross talk.
8734 void update_max_interval(void)
8736 max_load_balance_interval = HZ*num_online_cpus()/10;
8740 * It checks each scheduling domain to see if it is due to be balanced,
8741 * and initiates a balancing operation if so.
8743 * Balancing parameters are set up in init_sched_domains.
8745 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
8747 int continue_balancing = 1;
8749 unsigned long interval;
8750 struct sched_domain *sd;
8751 /* Earliest time when we have to do rebalance again */
8752 unsigned long next_balance = jiffies + 60*HZ;
8753 int update_next_balance = 0;
8754 int need_serialize, need_decay = 0;
8757 update_blocked_averages(cpu);
8760 for_each_domain(cpu, sd) {
8762 * Decay the newidle max times here because this is a regular
8763 * visit to all the domains. Decay ~1% per second.
8765 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
8766 sd->max_newidle_lb_cost =
8767 (sd->max_newidle_lb_cost * 253) / 256;
8768 sd->next_decay_max_lb_cost = jiffies + HZ;
8771 max_cost += sd->max_newidle_lb_cost;
8773 if (!(sd->flags & SD_LOAD_BALANCE))
8777 * Stop the load balance at this level. There is another
8778 * CPU in our sched group which is doing load balancing more
8781 if (!continue_balancing) {
8787 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
8789 need_serialize = sd->flags & SD_SERIALIZE;
8790 if (need_serialize) {
8791 if (!spin_trylock(&balancing))
8795 if (time_after_eq(jiffies, sd->last_balance + interval)) {
8796 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
8798 * The LBF_DST_PINNED logic could have changed
8799 * env->dst_cpu, so we can't know our idle
8800 * state even if we migrated tasks. Update it.
8802 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
8804 sd->last_balance = jiffies;
8805 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
8808 spin_unlock(&balancing);
8810 if (time_after(next_balance, sd->last_balance + interval)) {
8811 next_balance = sd->last_balance + interval;
8812 update_next_balance = 1;
8817 * Ensure the rq-wide value also decays but keep it at a
8818 * reasonable floor to avoid funnies with rq->avg_idle.
8820 rq->max_idle_balance_cost =
8821 max((u64)sysctl_sched_migration_cost, max_cost);
8826 * next_balance will be updated only when there is a need.
8827 * When the cpu is attached to null domain for ex, it will not be
8830 if (likely(update_next_balance)) {
8831 rq->next_balance = next_balance;
8833 #ifdef CONFIG_NO_HZ_COMMON
8835 * If this CPU has been elected to perform the nohz idle
8836 * balance. Other idle CPUs have already rebalanced with
8837 * nohz_idle_balance() and nohz.next_balance has been
8838 * updated accordingly. This CPU is now running the idle load
8839 * balance for itself and we need to update the
8840 * nohz.next_balance accordingly.
8842 if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
8843 nohz.next_balance = rq->next_balance;
8848 #ifdef CONFIG_NO_HZ_COMMON
8850 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
8851 * rebalancing for all the cpus for whom scheduler ticks are stopped.
8853 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
8855 int this_cpu = this_rq->cpu;
8858 /* Earliest time when we have to do rebalance again */
8859 unsigned long next_balance = jiffies + 60*HZ;
8860 int update_next_balance = 0;
8862 if (idle != CPU_IDLE ||
8863 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
8866 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
8867 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
8871 * If this cpu gets work to do, stop the load balancing
8872 * work being done for other cpus. Next load
8873 * balancing owner will pick it up.
8878 rq = cpu_rq(balance_cpu);
8881 * If time for next balance is due,
8884 if (time_after_eq(jiffies, rq->next_balance)) {
8887 rq_lock_irq(rq, &rf);
8888 update_rq_clock(rq);
8889 cpu_load_update_idle(rq);
8890 rq_unlock_irq(rq, &rf);
8892 rebalance_domains(rq, CPU_IDLE);
8895 if (time_after(next_balance, rq->next_balance)) {
8896 next_balance = rq->next_balance;
8897 update_next_balance = 1;
8902 * next_balance will be updated only when there is a need.
8903 * When the CPU is attached to null domain for ex, it will not be
8906 if (likely(update_next_balance))
8907 nohz.next_balance = next_balance;
8909 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
8913 * Current heuristic for kicking the idle load balancer in the presence
8914 * of an idle cpu in the system.
8915 * - This rq has more than one task.
8916 * - This rq has at least one CFS task and the capacity of the CPU is
8917 * significantly reduced because of RT tasks or IRQs.
8918 * - At parent of LLC scheduler domain level, this cpu's scheduler group has
8919 * multiple busy cpu.
8920 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
8921 * domain span are idle.
8923 static inline bool nohz_kick_needed(struct rq *rq)
8925 unsigned long now = jiffies;
8926 struct sched_domain_shared *sds;
8927 struct sched_domain *sd;
8928 int nr_busy, i, cpu = rq->cpu;
8931 if (unlikely(rq->idle_balance))
8935 * We may be recently in ticked or tickless idle mode. At the first
8936 * busy tick after returning from idle, we will update the busy stats.
8938 set_cpu_sd_state_busy();
8939 nohz_balance_exit_idle(cpu);
8942 * None are in tickless mode and hence no need for NOHZ idle load
8945 if (likely(!atomic_read(&nohz.nr_cpus)))
8948 if (time_before(now, nohz.next_balance))
8951 if (rq->nr_running >= 2)
8955 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
8958 * XXX: write a coherent comment on why we do this.
8959 * See also: http://lkml.kernel.org/r/20111202010832.602203411@sbsiddha-desk.sc.intel.com
8961 nr_busy = atomic_read(&sds->nr_busy_cpus);
8969 sd = rcu_dereference(rq->sd);
8971 if ((rq->cfs.h_nr_running >= 1) &&
8972 check_cpu_capacity(rq, sd)) {
8978 sd = rcu_dereference(per_cpu(sd_asym, cpu));
8980 for_each_cpu(i, sched_domain_span(sd)) {
8982 !cpumask_test_cpu(i, nohz.idle_cpus_mask))
8985 if (sched_asym_prefer(i, cpu)) {
8996 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
9000 * run_rebalance_domains is triggered when needed from the scheduler tick.
9001 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
9003 static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
9005 struct rq *this_rq = this_rq();
9006 enum cpu_idle_type idle = this_rq->idle_balance ?
9007 CPU_IDLE : CPU_NOT_IDLE;
9010 * If this cpu has a pending nohz_balance_kick, then do the
9011 * balancing on behalf of the other idle cpus whose ticks are
9012 * stopped. Do nohz_idle_balance *before* rebalance_domains to
9013 * give the idle cpus a chance to load balance. Else we may
9014 * load balance only within the local sched_domain hierarchy
9015 * and abort nohz_idle_balance altogether if we pull some load.
9017 nohz_idle_balance(this_rq, idle);
9018 rebalance_domains(this_rq, idle);
9022 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
9024 void trigger_load_balance(struct rq *rq)
9026 /* Don't need to rebalance while attached to NULL domain */
9027 if (unlikely(on_null_domain(rq)))
9030 if (time_after_eq(jiffies, rq->next_balance))
9031 raise_softirq(SCHED_SOFTIRQ);
9032 #ifdef CONFIG_NO_HZ_COMMON
9033 if (nohz_kick_needed(rq))
9034 nohz_balancer_kick();
9038 static void rq_online_fair(struct rq *rq)
9042 update_runtime_enabled(rq);
9045 static void rq_offline_fair(struct rq *rq)
9049 /* Ensure any throttled groups are reachable by pick_next_task */
9050 unthrottle_offline_cfs_rqs(rq);
9053 #endif /* CONFIG_SMP */
9056 * scheduler tick hitting a task of our scheduling class:
9058 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
9060 struct cfs_rq *cfs_rq;
9061 struct sched_entity *se = &curr->se;
9063 for_each_sched_entity(se) {
9064 cfs_rq = cfs_rq_of(se);
9065 entity_tick(cfs_rq, se, queued);
9068 if (static_branch_unlikely(&sched_numa_balancing))
9069 task_tick_numa(rq, curr);
9073 * called on fork with the child task as argument from the parent's context
9074 * - child not yet on the tasklist
9075 * - preemption disabled
9077 static void task_fork_fair(struct task_struct *p)
9079 struct cfs_rq *cfs_rq;
9080 struct sched_entity *se = &p->se, *curr;
9081 struct rq *rq = this_rq();
9085 update_rq_clock(rq);
9087 cfs_rq = task_cfs_rq(current);
9088 curr = cfs_rq->curr;
9090 update_curr(cfs_rq);
9091 se->vruntime = curr->vruntime;
9093 place_entity(cfs_rq, se, 1);
9095 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
9097 * Upon rescheduling, sched_class::put_prev_task() will place
9098 * 'current' within the tree based on its new key value.
9100 swap(curr->vruntime, se->vruntime);
9104 se->vruntime -= cfs_rq->min_vruntime;
9109 * Priority of the task has changed. Check to see if we preempt
9113 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
9115 if (!task_on_rq_queued(p))
9119 * Reschedule if we are currently running on this runqueue and
9120 * our priority decreased, or if we are not currently running on
9121 * this runqueue and our priority is higher than the current's
9123 if (rq->curr == p) {
9124 if (p->prio > oldprio)
9127 check_preempt_curr(rq, p, 0);
9130 static inline bool vruntime_normalized(struct task_struct *p)
9132 struct sched_entity *se = &p->se;
9135 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
9136 * the dequeue_entity(.flags=0) will already have normalized the
9143 * When !on_rq, vruntime of the task has usually NOT been normalized.
9144 * But there are some cases where it has already been normalized:
9146 * - A forked child which is waiting for being woken up by
9147 * wake_up_new_task().
9148 * - A task which has been woken up by try_to_wake_up() and
9149 * waiting for actually being woken up by sched_ttwu_pending().
9151 if (!se->sum_exec_runtime || p->state == TASK_WAKING)
9157 #ifdef CONFIG_FAIR_GROUP_SCHED
9159 * Propagate the changes of the sched_entity across the tg tree to make it
9160 * visible to the root
9162 static void propagate_entity_cfs_rq(struct sched_entity *se)
9164 struct cfs_rq *cfs_rq;
9166 /* Start to propagate at parent */
9169 for_each_sched_entity(se) {
9170 cfs_rq = cfs_rq_of(se);
9172 if (cfs_rq_throttled(cfs_rq))
9175 update_load_avg(se, UPDATE_TG);
9179 static void propagate_entity_cfs_rq(struct sched_entity *se) { }
9182 static void detach_entity_cfs_rq(struct sched_entity *se)
9184 struct cfs_rq *cfs_rq = cfs_rq_of(se);
9186 /* Catch up with the cfs_rq and remove our load when we leave */
9187 update_load_avg(se, 0);
9188 detach_entity_load_avg(cfs_rq, se);
9189 update_tg_load_avg(cfs_rq, false);
9190 propagate_entity_cfs_rq(se);
9193 static void attach_entity_cfs_rq(struct sched_entity *se)
9195 struct cfs_rq *cfs_rq = cfs_rq_of(se);
9197 #ifdef CONFIG_FAIR_GROUP_SCHED
9199 * Since the real-depth could have been changed (only FAIR
9200 * class maintain depth value), reset depth properly.
9202 se->depth = se->parent ? se->parent->depth + 1 : 0;
9205 /* Synchronize entity with its cfs_rq */
9206 update_load_avg(se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
9207 attach_entity_load_avg(cfs_rq, se);
9208 update_tg_load_avg(cfs_rq, false);
9209 propagate_entity_cfs_rq(se);
9212 static void detach_task_cfs_rq(struct task_struct *p)
9214 struct sched_entity *se = &p->se;
9215 struct cfs_rq *cfs_rq = cfs_rq_of(se);
9217 if (!vruntime_normalized(p)) {
9219 * Fix up our vruntime so that the current sleep doesn't
9220 * cause 'unlimited' sleep bonus.
9222 place_entity(cfs_rq, se, 0);
9223 se->vruntime -= cfs_rq->min_vruntime;
9226 detach_entity_cfs_rq(se);
9229 static void attach_task_cfs_rq(struct task_struct *p)
9231 struct sched_entity *se = &p->se;
9232 struct cfs_rq *cfs_rq = cfs_rq_of(se);
9234 attach_entity_cfs_rq(se);
9236 if (!vruntime_normalized(p))
9237 se->vruntime += cfs_rq->min_vruntime;
9240 static void switched_from_fair(struct rq *rq, struct task_struct *p)
9242 detach_task_cfs_rq(p);
9245 static void switched_to_fair(struct rq *rq, struct task_struct *p)
9247 attach_task_cfs_rq(p);
9249 if (task_on_rq_queued(p)) {
9251 * We were most likely switched from sched_rt, so
9252 * kick off the schedule if running, otherwise just see
9253 * if we can still preempt the current task.
9258 check_preempt_curr(rq, p, 0);
9262 /* Account for a task changing its policy or group.
9264 * This routine is mostly called to set cfs_rq->curr field when a task
9265 * migrates between groups/classes.
9267 static void set_curr_task_fair(struct rq *rq)
9269 struct sched_entity *se = &rq->curr->se;
9271 for_each_sched_entity(se) {
9272 struct cfs_rq *cfs_rq = cfs_rq_of(se);
9274 set_next_entity(cfs_rq, se);
9275 /* ensure bandwidth has been allocated on our new cfs_rq */
9276 account_cfs_rq_runtime(cfs_rq, 0);
9280 void init_cfs_rq(struct cfs_rq *cfs_rq)
9282 cfs_rq->tasks_timeline = RB_ROOT;
9283 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
9284 #ifndef CONFIG_64BIT
9285 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
9288 #ifdef CONFIG_FAIR_GROUP_SCHED
9289 cfs_rq->propagate_avg = 0;
9291 atomic_long_set(&cfs_rq->removed_load_avg, 0);
9292 atomic_long_set(&cfs_rq->removed_util_avg, 0);
9296 #ifdef CONFIG_FAIR_GROUP_SCHED
9297 static void task_set_group_fair(struct task_struct *p)
9299 struct sched_entity *se = &p->se;
9301 set_task_rq(p, task_cpu(p));
9302 se->depth = se->parent ? se->parent->depth + 1 : 0;
9305 static void task_move_group_fair(struct task_struct *p)
9307 detach_task_cfs_rq(p);
9308 set_task_rq(p, task_cpu(p));
9311 /* Tell se's cfs_rq has been changed -- migrated */
9312 p->se.avg.last_update_time = 0;
9314 attach_task_cfs_rq(p);
9317 static void task_change_group_fair(struct task_struct *p, int type)
9320 case TASK_SET_GROUP:
9321 task_set_group_fair(p);
9324 case TASK_MOVE_GROUP:
9325 task_move_group_fair(p);
9330 void free_fair_sched_group(struct task_group *tg)
9334 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
9336 for_each_possible_cpu(i) {
9338 kfree(tg->cfs_rq[i]);
9347 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
9349 struct sched_entity *se;
9350 struct cfs_rq *cfs_rq;
9353 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
9356 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
9360 tg->shares = NICE_0_LOAD;
9362 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
9364 for_each_possible_cpu(i) {
9365 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
9366 GFP_KERNEL, cpu_to_node(i));
9370 se = kzalloc_node(sizeof(struct sched_entity),
9371 GFP_KERNEL, cpu_to_node(i));
9375 init_cfs_rq(cfs_rq);
9376 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
9377 init_entity_runnable_average(se);
9388 void online_fair_sched_group(struct task_group *tg)
9390 struct sched_entity *se;
9394 for_each_possible_cpu(i) {
9398 raw_spin_lock_irq(&rq->lock);
9399 update_rq_clock(rq);
9400 attach_entity_cfs_rq(se);
9401 sync_throttle(tg, i);
9402 raw_spin_unlock_irq(&rq->lock);
9406 void unregister_fair_sched_group(struct task_group *tg)
9408 unsigned long flags;
9412 for_each_possible_cpu(cpu) {
9414 remove_entity_load_avg(tg->se[cpu]);
9417 * Only empty task groups can be destroyed; so we can speculatively
9418 * check on_list without danger of it being re-added.
9420 if (!tg->cfs_rq[cpu]->on_list)
9425 raw_spin_lock_irqsave(&rq->lock, flags);
9426 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
9427 raw_spin_unlock_irqrestore(&rq->lock, flags);
9431 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
9432 struct sched_entity *se, int cpu,
9433 struct sched_entity *parent)
9435 struct rq *rq = cpu_rq(cpu);
9439 init_cfs_rq_runtime(cfs_rq);
9441 tg->cfs_rq[cpu] = cfs_rq;
9444 /* se could be NULL for root_task_group */
9449 se->cfs_rq = &rq->cfs;
9452 se->cfs_rq = parent->my_q;
9453 se->depth = parent->depth + 1;
9457 /* guarantee group entities always have weight */
9458 update_load_set(&se->load, NICE_0_LOAD);
9459 se->parent = parent;
9462 static DEFINE_MUTEX(shares_mutex);
9464 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
9469 * We can't change the weight of the root cgroup.
9474 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
9476 mutex_lock(&shares_mutex);
9477 if (tg->shares == shares)
9480 tg->shares = shares;
9481 for_each_possible_cpu(i) {
9482 struct rq *rq = cpu_rq(i);
9483 struct sched_entity *se = tg->se[i];
9486 /* Propagate contribution to hierarchy */
9487 rq_lock_irqsave(rq, &rf);
9488 update_rq_clock(rq);
9489 for_each_sched_entity(se) {
9490 update_load_avg(se, UPDATE_TG);
9491 update_cfs_shares(se);
9493 rq_unlock_irqrestore(rq, &rf);
9497 mutex_unlock(&shares_mutex);
9500 #else /* CONFIG_FAIR_GROUP_SCHED */
9502 void free_fair_sched_group(struct task_group *tg) { }
9504 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
9509 void online_fair_sched_group(struct task_group *tg) { }
9511 void unregister_fair_sched_group(struct task_group *tg) { }
9513 #endif /* CONFIG_FAIR_GROUP_SCHED */
9516 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
9518 struct sched_entity *se = &task->se;
9519 unsigned int rr_interval = 0;
9522 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
9525 if (rq->cfs.load.weight)
9526 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
9532 * All the scheduling class methods:
9534 const struct sched_class fair_sched_class = {
9535 .next = &idle_sched_class,
9536 .enqueue_task = enqueue_task_fair,
9537 .dequeue_task = dequeue_task_fair,
9538 .yield_task = yield_task_fair,
9539 .yield_to_task = yield_to_task_fair,
9541 .check_preempt_curr = check_preempt_wakeup,
9543 .pick_next_task = pick_next_task_fair,
9544 .put_prev_task = put_prev_task_fair,
9547 .select_task_rq = select_task_rq_fair,
9548 .migrate_task_rq = migrate_task_rq_fair,
9550 .rq_online = rq_online_fair,
9551 .rq_offline = rq_offline_fair,
9553 .task_dead = task_dead_fair,
9554 .set_cpus_allowed = set_cpus_allowed_common,
9557 .set_curr_task = set_curr_task_fair,
9558 .task_tick = task_tick_fair,
9559 .task_fork = task_fork_fair,
9561 .prio_changed = prio_changed_fair,
9562 .switched_from = switched_from_fair,
9563 .switched_to = switched_to_fair,
9565 .get_rr_interval = get_rr_interval_fair,
9567 .update_curr = update_curr_fair,
9569 #ifdef CONFIG_FAIR_GROUP_SCHED
9570 .task_change_group = task_change_group_fair,
9574 #ifdef CONFIG_SCHED_DEBUG
9575 void print_cfs_stats(struct seq_file *m, int cpu)
9577 struct cfs_rq *cfs_rq;
9580 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
9581 print_cfs_rq(m, cpu, cfs_rq);
9585 #ifdef CONFIG_NUMA_BALANCING
9586 void show_numa_stats(struct task_struct *p, struct seq_file *m)
9589 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
9591 for_each_online_node(node) {
9592 if (p->numa_faults) {
9593 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
9594 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
9596 if (p->numa_group) {
9597 gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)],
9598 gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)];
9600 print_numa_stats(m, node, tsf, tpf, gsf, gpf);
9603 #endif /* CONFIG_NUMA_BALANCING */
9604 #endif /* CONFIG_SCHED_DEBUG */
9606 __init void init_sched_fair_class(void)
9609 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
9611 #ifdef CONFIG_NO_HZ_COMMON
9612 nohz.next_balance = jiffies;
9613 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);