]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - kernel/sched.c
[media] saa7164: poll mask set incorrectly
[karo-tx-linux.git] / kernel / sched.c
index cbb3a0eee58eb2c5c6748b949fca5579bbb57432..fde6ff90352583d65ff890a407200f2fb0c3073e 100644 (file)
@@ -292,8 +292,8 @@ static DEFINE_SPINLOCK(task_group_lock);
  * (The default weight is 1024 - so there's no practical
  *  limitation from this.)
  */
-#define MIN_SHARES     2
-#define MAX_SHARES     (1UL << (18 + SCHED_LOAD_RESOLUTION))
+#define MIN_SHARES     (1UL <<  1)
+#define MAX_SHARES     (1UL << 18)
 
 static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
 #endif
@@ -605,10 +605,10 @@ static inline int cpu_of(struct rq *rq)
 /*
  * Return the group to which this tasks belongs.
  *
- * We use task_subsys_state_check() and extend the RCU verification
- * with lockdep_is_held(&p->pi_lock) because cpu_cgroup_attach()
- * holds that lock for each task it moves into the cgroup. Therefore
- * by holding that lock, we pin the task to the current cgroup.
+ * We use task_subsys_state_check() and extend the RCU verification with
+ * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
+ * task it moves into the cgroup. Therefore by holding either of those locks,
+ * we pin the task to the current cgroup.
  */
 static inline struct task_group *task_group(struct task_struct *p)
 {
@@ -616,7 +616,8 @@ static inline struct task_group *task_group(struct task_struct *p)
        struct cgroup_subsys_state *css;
 
        css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
-                       lockdep_is_held(&p->pi_lock));
+                       lockdep_is_held(&p->pi_lock) ||
+                       lockdep_is_held(&task_rq(p)->lock));
        tg = container_of(css, struct task_group, css);
 
        return autogroup_task_group(p, tg);
@@ -2200,6 +2201,16 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
                        !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
 
 #ifdef CONFIG_LOCKDEP
+       /*
+        * The caller should hold either p->pi_lock or rq->lock, when changing
+        * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
+        *
+        * sched_move_task() holds both and thus holding either pins the cgroup,
+        * see set_task_rq().
+        *
+        * Furthermore, all task_rq users should acquire both locks, see
+        * task_rq_lock().
+        */
        WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
                                      lockdep_is_held(&task_rq(p)->lock)));
 #endif
@@ -2447,6 +2458,10 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
                }
                rcu_read_unlock();
        }
+
+       if (wake_flags & WF_MIGRATED)
+               schedstat_inc(p, se.statistics.nr_wakeups_migrate);
+
 #endif /* CONFIG_SMP */
 
        schedstat_inc(rq, ttwu_count);
@@ -2455,9 +2470,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
        if (wake_flags & WF_SYNC)
                schedstat_inc(p, se.statistics.nr_wakeups_sync);
 
-       if (cpu != task_cpu(p))
-               schedstat_inc(p, se.statistics.nr_wakeups_migrate);
-
 #endif /* CONFIG_SCHEDSTATS */
 }
 
@@ -2532,13 +2544,9 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
 }
 
 #ifdef CONFIG_SMP
-static void sched_ttwu_pending(void)
+static void sched_ttwu_do_pending(struct task_struct *list)
 {
        struct rq *rq = this_rq();
-       struct task_struct *list = xchg(&rq->wake_list, NULL);
-
-       if (!list)
-               return;
 
        raw_spin_lock(&rq->lock);
 
@@ -2551,9 +2559,45 @@ static void sched_ttwu_pending(void)
        raw_spin_unlock(&rq->lock);
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+
+static void sched_ttwu_pending(void)
+{
+       struct rq *rq = this_rq();
+       struct task_struct *list = xchg(&rq->wake_list, NULL);
+
+       if (!list)
+               return;
+
+       sched_ttwu_do_pending(list);
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
 void scheduler_ipi(void)
 {
-       sched_ttwu_pending();
+       struct rq *rq = this_rq();
+       struct task_struct *list = xchg(&rq->wake_list, NULL);
+
+       if (!list)
+               return;
+
+       /*
+        * Not all reschedule IPI handlers call irq_enter/irq_exit, since
+        * traditionally all their work was done from the interrupt return
+        * path. Now that we actually do some work, we need to make sure
+        * we do call them.
+        *
+        * Some archs already do call them, luckily irq_enter/exit nest
+        * properly.
+        *
+        * Arguably we should visit all archs and update all handlers,
+        * however a fair share of IPIs are still resched only so this would
+        * somewhat pessimize the simple resched case.
+        */
+       irq_enter();
+       sched_ttwu_do_pending(list);
+       irq_exit();
 }
 
 static void ttwu_queue_remote(struct task_struct *p, int cpu)
@@ -2600,6 +2644,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
 
 #if defined(CONFIG_SMP)
        if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
+               sched_clock_cpu(cpu); /* sync clocks x-cpu */
                ttwu_queue_remote(p, cpu);
                return;
        }
@@ -2674,8 +2719,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
                p->sched_class->task_waking(p);
 
        cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
-       if (task_cpu(p) != cpu)
+       if (task_cpu(p) != cpu) {
+               wake_flags |= WF_MIGRATED;
                set_task_cpu(p, cpu);
+       }
 #endif /* CONFIG_SMP */
 
        ttwu_queue(p, cpu);
@@ -6542,7 +6589,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                        break;
                }
 
-               if (!group->cpu_power) {
+               if (!group->sgp->power) {
                        printk(KERN_CONT "\n");
                        printk(KERN_ERR "ERROR: domain->cpu_power not "
                                        "set\n");
@@ -6566,9 +6613,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
 
                printk(KERN_CONT " %s", str);
-               if (group->cpu_power != SCHED_POWER_SCALE) {
+               if (group->sgp->power != SCHED_POWER_SCALE) {
                        printk(KERN_CONT " (cpu_power = %d)",
-                               group->cpu_power);
+                               group->sgp->power);
                }
 
                group = group->next;
@@ -6759,11 +6806,39 @@ static struct root_domain *alloc_rootdomain(void)
        return rd;
 }
 
+static void free_sched_groups(struct sched_group *sg, int free_sgp)
+{
+       struct sched_group *tmp, *first;
+
+       if (!sg)
+               return;
+
+       first = sg;
+       do {
+               tmp = sg->next;
+
+               if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
+                       kfree(sg->sgp);
+
+               kfree(sg);
+               sg = tmp;
+       } while (sg != first);
+}
+
 static void free_sched_domain(struct rcu_head *rcu)
 {
        struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
-       if (atomic_dec_and_test(&sd->groups->ref))
+
+       /*
+        * If its an overlapping domain it has private groups, iterate and
+        * nuke them all.
+        */
+       if (sd->flags & SD_OVERLAP) {
+               free_sched_groups(sd->groups, 1);
+       } else if (atomic_dec_and_test(&sd->groups->ref)) {
+               kfree(sd->groups->sgp);
                kfree(sd->groups);
+       }
        kfree(sd);
 }
 
@@ -6930,6 +7005,7 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
 struct sd_data {
        struct sched_domain **__percpu sd;
        struct sched_group **__percpu sg;
+       struct sched_group_power **__percpu sgp;
 };
 
 struct s_data {
@@ -6949,15 +7025,73 @@ struct sched_domain_topology_level;
 typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
 
+#define SDTL_OVERLAP   0x01
+
 struct sched_domain_topology_level {
        sched_domain_init_f init;
        sched_domain_mask_f mask;
+       int                 flags;
        struct sd_data      data;
 };
 
-/*
- * Assumes the sched_domain tree is fully constructed
- */
+static int
+build_overlap_sched_groups(struct sched_domain *sd, int cpu)
+{
+       struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
+       const struct cpumask *span = sched_domain_span(sd);
+       struct cpumask *covered = sched_domains_tmpmask;
+       struct sd_data *sdd = sd->private;
+       struct sched_domain *child;
+       int i;
+
+       cpumask_clear(covered);
+
+       for_each_cpu(i, span) {
+               struct cpumask *sg_span;
+
+               if (cpumask_test_cpu(i, covered))
+                       continue;
+
+               sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
+                               GFP_KERNEL, cpu_to_node(i));
+
+               if (!sg)
+                       goto fail;
+
+               sg_span = sched_group_cpus(sg);
+
+               child = *per_cpu_ptr(sdd->sd, i);
+               if (child->child) {
+                       child = child->child;
+                       cpumask_copy(sg_span, sched_domain_span(child));
+               } else
+                       cpumask_set_cpu(i, sg_span);
+
+               cpumask_or(covered, covered, sg_span);
+
+               sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
+               atomic_inc(&sg->sgp->ref);
+
+               if (cpumask_test_cpu(cpu, sg_span))
+                       groups = sg;
+
+               if (!first)
+                       first = sg;
+               if (last)
+                       last->next = sg;
+               last = sg;
+               last->next = first;
+       }
+       sd->groups = groups;
+
+       return 0;
+
+fail:
+       free_sched_groups(first, 0);
+
+       return -ENOMEM;
+}
+
 static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
 {
        struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
@@ -6966,24 +7100,24 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
        if (child)
                cpu = cpumask_first(sched_domain_span(child));
 
-       if (sg)
+       if (sg) {
                *sg = *per_cpu_ptr(sdd->sg, cpu);
+               (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
+               atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
+       }
 
        return cpu;
 }
 
 /*
- * build_sched_groups takes the cpumask we wish to span, and a pointer
- * to a function which identifies what group(along with sched group) a CPU
- * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
- * (due to the fact that we keep track of groups covered with a struct cpumask).
- *
  * build_sched_groups will build a circular linked list of the groups
  * covered by the given span, and will set each group's ->cpumask correctly,
  * and ->cpu_power to 0.
+ *
+ * Assumes the sched_domain tree is fully constructed
  */
-static void
-build_sched_groups(struct sched_domain *sd)
+static int
+build_sched_groups(struct sched_domain *sd, int cpu)
 {
        struct sched_group *first = NULL, *last = NULL;
        struct sd_data *sdd = sd->private;
@@ -6991,6 +7125,12 @@ build_sched_groups(struct sched_domain *sd)
        struct cpumask *covered;
        int i;
 
+       get_group(cpu, sdd, &sd->groups);
+       atomic_inc(&sd->groups->ref);
+
+       if (cpu != cpumask_first(sched_domain_span(sd)))
+               return 0;
+
        lockdep_assert_held(&sched_domains_mutex);
        covered = sched_domains_tmpmask;
 
@@ -7005,7 +7145,7 @@ build_sched_groups(struct sched_domain *sd)
                        continue;
 
                cpumask_clear(sched_group_cpus(sg));
-               sg->cpu_power = 0;
+               sg->sgp->power = 0;
 
                for_each_cpu(j, span) {
                        if (get_group(j, sdd, NULL) != group)
@@ -7022,6 +7162,8 @@ build_sched_groups(struct sched_domain *sd)
                last = sg;
        }
        last->next = first;
+
+       return 0;
 }
 
 /*
@@ -7036,12 +7178,17 @@ build_sched_groups(struct sched_domain *sd)
  */
 static void init_sched_groups_power(int cpu, struct sched_domain *sd)
 {
-       WARN_ON(!sd || !sd->groups);
+       struct sched_group *sg = sd->groups;
 
-       if (cpu != group_first_cpu(sd->groups))
-               return;
+       WARN_ON(!sd || !sg);
+
+       do {
+               sg->group_weight = cpumask_weight(sched_group_cpus(sg));
+               sg = sg->next;
+       } while (sg != sd->groups);
 
-       sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
+       if (cpu != group_first_cpu(sg))
+               return;
 
        update_group_power(sd, cpu);
 }
@@ -7162,15 +7309,15 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
 static void claim_allocations(int cpu, struct sched_domain *sd)
 {
        struct sd_data *sdd = sd->private;
-       struct sched_group *sg = sd->groups;
 
        WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
        *per_cpu_ptr(sdd->sd, cpu) = NULL;
 
-       if (cpu == cpumask_first(sched_group_cpus(sg))) {
-               WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg);
+       if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
                *per_cpu_ptr(sdd->sg, cpu) = NULL;
-       }
+
+       if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
+               *per_cpu_ptr(sdd->sgp, cpu) = NULL;
 }
 
 #ifdef CONFIG_SCHED_SMT
@@ -7195,7 +7342,7 @@ static struct sched_domain_topology_level default_topology[] = {
 #endif
        { sd_init_CPU, cpu_cpu_mask, },
 #ifdef CONFIG_NUMA
-       { sd_init_NODE, cpu_node_mask, },
+       { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
        { sd_init_ALLNODES, cpu_allnodes_mask, },
 #endif
        { NULL, },
@@ -7219,9 +7366,14 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
                if (!sdd->sg)
                        return -ENOMEM;
 
+               sdd->sgp = alloc_percpu(struct sched_group_power *);
+               if (!sdd->sgp)
+                       return -ENOMEM;
+
                for_each_cpu(j, cpu_map) {
                        struct sched_domain *sd;
                        struct sched_group *sg;
+                       struct sched_group_power *sgp;
 
                        sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
                                        GFP_KERNEL, cpu_to_node(j));
@@ -7236,6 +7388,13 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
                                return -ENOMEM;
 
                        *per_cpu_ptr(sdd->sg, j) = sg;
+
+                       sgp = kzalloc_node(sizeof(struct sched_group_power),
+                                       GFP_KERNEL, cpu_to_node(j));
+                       if (!sgp)
+                               return -ENOMEM;
+
+                       *per_cpu_ptr(sdd->sgp, j) = sgp;
                }
        }
 
@@ -7251,11 +7410,15 @@ static void __sdt_free(const struct cpumask *cpu_map)
                struct sd_data *sdd = &tl->data;
 
                for_each_cpu(j, cpu_map) {
-                       kfree(*per_cpu_ptr(sdd->sd, j));
+                       struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
+                       if (sd && (sd->flags & SD_OVERLAP))
+                               free_sched_groups(sd->groups, 0);
                        kfree(*per_cpu_ptr(sdd->sg, j));
+                       kfree(*per_cpu_ptr(sdd->sgp, j));
                }
                free_percpu(sdd->sd);
                free_percpu(sdd->sg);
+               free_percpu(sdd->sgp);
        }
 }
 
@@ -7301,8 +7464,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
                struct sched_domain_topology_level *tl;
 
                sd = NULL;
-               for (tl = sched_domain_topology; tl->init; tl++)
+               for (tl = sched_domain_topology; tl->init; tl++) {
                        sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
+                       if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
+                               sd->flags |= SD_OVERLAP;
+                       if (cpumask_equal(cpu_map, sched_domain_span(sd)))
+                               break;
+               }
 
                while (sd->child)
                        sd = sd->child;
@@ -7314,13 +7482,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
        for_each_cpu(i, cpu_map) {
                for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
                        sd->span_weight = cpumask_weight(sched_domain_span(sd));
-                       get_group(i, sd->private, &sd->groups);
-                       atomic_inc(&sd->groups->ref);
-
-                       if (i != cpumask_first(sched_domain_span(sd)))
-                               continue;
-
-                       build_sched_groups(sd);
+                       if (sd->flags & SD_OVERLAP) {
+                               if (build_overlap_sched_groups(sd, i))
+                                       goto error;
+                       } else {
+                               if (build_sched_groups(sd, i))
+                                       goto error;
+                       }
                }
        }
 
@@ -7742,6 +7910,9 @@ static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
 #endif
 #endif
        cfs_rq->min_vruntime = (u64)(-(1LL << 20));
+#ifndef CONFIG_64BIT
+       cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
+#endif
 }
 
 static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
@@ -8435,10 +8606,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
        if (!tg->se[0])
                return -EINVAL;
 
-       if (shares < MIN_SHARES)
-               shares = MIN_SHARES;
-       else if (shares > MAX_SHARES)
-               shares = MAX_SHARES;
+       shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
 
        mutex_lock(&shares_mutex);
        if (tg->shares == shares)