]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
sched: Extend scheduler's asym packing
authorTim Chen <tim.c.chen@linux.intel.com>
Tue, 22 Nov 2016 20:23:53 +0000 (12:23 -0800)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 24 Nov 2016 13:09:46 +0000 (14:09 +0100)
We generalize the scheduler's asym packing to provide an ordering
of the cpu beyond just the cpu number.  This allows the use of the
ASYM_PACKING scheduler machinery to move loads to preferred CPU in a
sched domain. The preference is defined with the cpu priority
given by arch_asym_cpu_priority(cpu).

We also record the most preferred cpu in a sched group when
we build the cpu's capacity for fast lookup of preferred cpu
during load balancing.

Co-developed-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: linux-pm@vger.kernel.org
Cc: jolsa@redhat.com
Cc: rjw@rjwysocki.net
Cc: linux-acpi@vger.kernel.org
Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Cc: bp@suse.de
Link: http://lkml.kernel.org/r/0e73ae12737dfaafa46c07066cc7c5d3f1675e46.1479844244.git.tim.c.chen@linux.intel.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/sched.h
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/sched.h

index 19abba04cecad90dc6614c669f8806a50d233760..fe9a499d5aa4e6f64bc92b8c39d4e27a74e8d671 100644 (file)
@@ -1077,6 +1077,8 @@ static inline int cpu_numa_flags(void)
 }
 #endif
 
+extern int arch_asym_cpu_priority(int cpu);
+
 struct sched_domain_attr {
        int relax_domain_level;
 };
index dc64bd71ed2bc6a1d8d8c848015774c9d48ee3cc..393759bd526e84ace42a83eafe9bcda61d7aceec 100644 (file)
@@ -6303,7 +6303,22 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
        WARN_ON(!sg);
 
        do {
+               int cpu, max_cpu = -1;
+
                sg->group_weight = cpumask_weight(sched_group_cpus(sg));
+
+               if (!(sd->flags & SD_ASYM_PACKING))
+                       goto next;
+
+               for_each_cpu(cpu, sched_group_cpus(sg)) {
+                       if (max_cpu < 0)
+                               max_cpu = cpu;
+                       else if (sched_asym_prefer(cpu, max_cpu))
+                               max_cpu = cpu;
+               }
+               sg->asym_prefer_cpu = max_cpu;
+
+next:
                sg = sg->next;
        } while (sg != sd->groups);
 
index aa475896782d4f52d4b689a7b6accfb0db5c0e57..18d9e75f1f6ef79654bfd9133be77a9ff92667f2 100644 (file)
@@ -97,6 +97,16 @@ unsigned int normalized_sysctl_sched_wakeup_granularity      = 1000000UL;
 
 const_debug unsigned int sysctl_sched_migration_cost   = 500000UL;
 
+#ifdef CONFIG_SMP
+/*
+ * For asym packing, by default the lower numbered cpu has higher priority.
+ */
+int __weak arch_asym_cpu_priority(int cpu)
+{
+       return -cpu;
+}
+#endif
+
 #ifdef CONFIG_CFS_BANDWIDTH
 /*
  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
@@ -7388,16 +7398,18 @@ asym_packing:
        if (env->idle == CPU_NOT_IDLE)
                return true;
        /*
-        * ASYM_PACKING needs to move all the work to the lowest
-        * numbered CPUs in the group, therefore mark all groups
-        * higher than ourself as busy.
+        * ASYM_PACKING needs to move all the work to the highest
+        * prority CPUs in the group, therefore mark all groups
+        * of lower priority than ourself as busy.
         */
-       if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
+       if (sgs->sum_nr_running &&
+           sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
                if (!sds->busiest)
                        return true;
 
-               /* Prefer to move from highest possible cpu's work */
-               if (group_first_cpu(sds->busiest) < group_first_cpu(sg))
+               /* Prefer to move from lowest priority cpu's work */
+               if (sched_asym_prefer(sds->busiest->asym_prefer_cpu,
+                                     sg->asym_prefer_cpu))
                        return true;
        }
 
@@ -7549,8 +7561,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
        if (!sds->busiest)
                return 0;
 
-       busiest_cpu = group_first_cpu(sds->busiest);
-       if (env->dst_cpu > busiest_cpu)
+       busiest_cpu = sds->busiest->asym_prefer_cpu;
+       if (sched_asym_prefer(busiest_cpu, env->dst_cpu))
                return 0;
 
        env->imbalance = DIV_ROUND_CLOSEST(
@@ -7888,10 +7900,11 @@ static int need_active_balance(struct lb_env *env)
 
                /*
                 * ASYM_PACKING needs to force migrate tasks from busy but
-                * higher numbered CPUs in order to pack all tasks in the
-                * lowest numbered CPUs.
+                * lower priority CPUs in order to pack all tasks in the
+                * highest priority CPUs.
                 */
-               if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
+               if ((sd->flags & SD_ASYM_PACKING) &&
+                   sched_asym_prefer(env->dst_cpu, env->src_cpu))
                        return 1;
        }
 
@@ -8740,7 +8753,7 @@ static inline bool nohz_kick_needed(struct rq *rq)
        unsigned long now = jiffies;
        struct sched_domain_shared *sds;
        struct sched_domain *sd;
-       int nr_busy, cpu = rq->cpu;
+       int nr_busy, i, cpu = rq->cpu;
        bool kick = false;
 
        if (unlikely(rq->idle_balance))
@@ -8791,12 +8804,18 @@ static inline bool nohz_kick_needed(struct rq *rq)
        }
 
        sd = rcu_dereference(per_cpu(sd_asym, cpu));
-       if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
-                                 sched_domain_span(sd)) < cpu)) {
-               kick = true;
-               goto unlock;
-       }
+       if (sd) {
+               for_each_cpu(i, sched_domain_span(sd)) {
+                       if (i == cpu ||
+                           !cpumask_test_cpu(i, nohz.idle_cpus_mask))
+                               continue;
 
+                       if (sched_asym_prefer(i, cpu)) {
+                               kick = true;
+                               goto unlock;
+                       }
+               }
+       }
 unlock:
        rcu_read_unlock();
        return kick;
index d7e39317d6884938b2f5a31a7e641d936ab53bf1..7b34c7826ca5952be8701b58205c7b481cad11d2 100644 (file)
@@ -540,6 +540,11 @@ struct dl_rq {
 
 #ifdef CONFIG_SMP
 
+static inline bool sched_asym_prefer(int a, int b)
+{
+       return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
+}
+
 /*
  * We add the notion of a root-domain which will be used to define per-domain
  * variables. Each exclusive cpuset essentially defines an island domain by
@@ -908,6 +913,7 @@ struct sched_group {
 
        unsigned int group_weight;
        struct sched_group_capacity *sgc;
+       int asym_prefer_cpu;            /* cpu of highest priority in group */
 
        /*
         * The CPUs this group covers.