]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
Merge branch 'linus' into sched/core
authorIngo Molnar <mingo@elte.hu>
Thu, 14 Oct 2010 07:11:43 +0000 (09:11 +0200)
committerIngo Molnar <mingo@elte.hu>
Thu, 14 Oct 2010 07:11:46 +0000 (09:11 +0200)
Merge reason: update from -rc5 to -almost-final

Signed-off-by: Ingo Molnar <mingo@elte.hu>
1  2 
kernel/sched.c
kernel/sched_fair.c

diff --combined kernel/sched.c
index 4ad473814350e21df6dd516c45808283e74c3768,dc85ceb908322cad7196339f4df8dd58c37b1cec..2111491f642473e5b2a3662462f03266678473ac
@@@ -426,7 -426,9 +426,7 @@@ struct root_domain 
         */
        cpumask_var_t rto_mask;
        atomic_t rto_count;
 -#ifdef CONFIG_SMP
        struct cpupri cpupri;
 -#endif
  };
  
  /*
   */
  static struct root_domain def_root_domain;
  
 -#endif
 +#endif /* CONFIG_SMP */
  
  /*
   * This is the main, per-CPU runqueue data structure.
@@@ -721,7 -723,7 +721,7 @@@ sched_feat_write(struct file *filp, con
                size_t cnt, loff_t *ppos)
  {
        char buf[64];
 -      char *cmp = buf;
 +      char *cmp;
        int neg = 0;
        int i;
  
                return -EFAULT;
  
        buf[cnt] = 0;
 +      cmp = strstrip(buf);
  
        if (strncmp(buf, "NO_", 3) == 0) {
                neg = 1;
        }
  
        for (i = 0; sched_feat_names[i]; i++) {
 -              int len = strlen(sched_feat_names[i]);
 -
 -              if (strncmp(cmp, sched_feat_names[i], len) == 0) {
 +              if (strcmp(cmp, sched_feat_names[i]) == 0) {
                        if (neg)
                                sysctl_sched_features &= ~(1UL << i);
                        else
@@@ -2849,14 -2852,14 +2849,14 @@@ context_switch(struct rq *rq, struct ta
         */
        arch_start_context_switch(prev);
  
 -      if (likely(!mm)) {
 +      if (!mm) {
                next->active_mm = oldmm;
                atomic_inc(&oldmm->mm_count);
                enter_lazy_tlb(oldmm, next);
        } else
                switch_mm(oldmm, mm, next);
  
 -      if (likely(!prev->mm)) {
 +      if (!prev->mm) {
                prev->active_mm = NULL;
                rq->prev_mm = oldmm;
        }
@@@ -3510,9 -3513,9 +3510,9 @@@ void task_times(struct task_struct *p, 
        rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
  
        if (total) {
-               u64 temp;
+               u64 temp = rtime;
  
-               temp = (u64)(rtime * utime);
+               temp *= utime;
                do_div(temp, total);
                utime = (cputime_t)temp;
        } else
@@@ -3543,9 -3546,9 +3543,9 @@@ void thread_group_times(struct task_str
        rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
  
        if (total) {
-               u64 temp;
+               u64 temp = rtime;
  
-               temp = (u64)(rtime * cputime.utime);
+               temp *= cputime.utime;
                do_div(temp, total);
                utime = (cputime_t)temp;
        } else
@@@ -4355,7 -4358,6 +4355,7 @@@ void rt_mutex_setprio(struct task_struc
  
        rq = task_rq_lock(p, &flags);
  
 +      trace_sched_pi_setprio(p, prio);
        oldprio = p->prio;
        prev_class = p->sched_class;
        on_rq = p->se.on_rq;
@@@ -6512,7 -6514,6 +6512,7 @@@ struct s_data 
        cpumask_var_t           nodemask;
        cpumask_var_t           this_sibling_map;
        cpumask_var_t           this_core_map;
 +      cpumask_var_t           this_book_map;
        cpumask_var_t           send_covered;
        cpumask_var_t           tmpmask;
        struct sched_group      **sched_group_nodes;
@@@ -6524,7 -6525,6 +6524,7 @@@ enum s_alloc 
        sa_rootdomain,
        sa_tmpmask,
        sa_send_covered,
 +      sa_this_book_map,
        sa_this_core_map,
        sa_this_sibling_map,
        sa_nodemask,
@@@ -6560,48 -6560,31 +6560,48 @@@ cpu_to_cpu_group(int cpu, const struct 
  #ifdef CONFIG_SCHED_MC
  static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
  static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
 -#endif /* CONFIG_SCHED_MC */
  
 -#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
  static int
  cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
                  struct sched_group **sg, struct cpumask *mask)
  {
        int group;
 -
 +#ifdef CONFIG_SCHED_SMT
        cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
        group = cpumask_first(mask);
 +#else
 +      group = cpu;
 +#endif
        if (sg)
                *sg = &per_cpu(sched_group_core, group).sg;
        return group;
  }
 -#elif defined(CONFIG_SCHED_MC)
 +#endif /* CONFIG_SCHED_MC */
 +
 +/*
 + * book sched-domains:
 + */
 +#ifdef CONFIG_SCHED_BOOK
 +static DEFINE_PER_CPU(struct static_sched_domain, book_domains);
 +static DEFINE_PER_CPU(struct static_sched_group, sched_group_book);
 +
  static int
 -cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
 -                struct sched_group **sg, struct cpumask *unused)
 +cpu_to_book_group(int cpu, const struct cpumask *cpu_map,
 +                struct sched_group **sg, struct cpumask *mask)
  {
 +      int group = cpu;
 +#ifdef CONFIG_SCHED_MC
 +      cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
 +      group = cpumask_first(mask);
 +#elif defined(CONFIG_SCHED_SMT)
 +      cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
 +      group = cpumask_first(mask);
 +#endif
        if (sg)
 -              *sg = &per_cpu(sched_group_core, cpu).sg;
 -      return cpu;
 +              *sg = &per_cpu(sched_group_book, group).sg;
 +      return group;
  }
 -#endif
 +#endif /* CONFIG_SCHED_BOOK */
  
  static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
  static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
@@@ -6611,10 -6594,7 +6611,10 @@@ cpu_to_phys_group(int cpu, const struc
                  struct sched_group **sg, struct cpumask *mask)
  {
        int group;
 -#ifdef CONFIG_SCHED_MC
 +#ifdef CONFIG_SCHED_BOOK
 +      cpumask_and(mask, cpu_book_mask(cpu), cpu_map);
 +      group = cpumask_first(mask);
 +#elif defined(CONFIG_SCHED_MC)
        cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
        group = cpumask_first(mask);
  #elif defined(CONFIG_SCHED_SMT)
@@@ -6875,9 -6855,6 +6875,9 @@@ SD_INIT_FUNC(CPU
  #ifdef CONFIG_SCHED_MC
   SD_INIT_FUNC(MC)
  #endif
 +#ifdef CONFIG_SCHED_BOOK
 + SD_INIT_FUNC(BOOK)
 +#endif
  
  static int default_relax_domain_level = -1;
  
@@@ -6927,8 -6904,6 +6927,8 @@@ static void __free_domain_allocs(struc
                free_cpumask_var(d->tmpmask); /* fall through */
        case sa_send_covered:
                free_cpumask_var(d->send_covered); /* fall through */
 +      case sa_this_book_map:
 +              free_cpumask_var(d->this_book_map); /* fall through */
        case sa_this_core_map:
                free_cpumask_var(d->this_core_map); /* fall through */
        case sa_this_sibling_map:
@@@ -6975,10 -6950,8 +6975,10 @@@ static enum s_alloc __visit_domain_allo
                return sa_nodemask;
        if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
                return sa_this_sibling_map;
 -      if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
 +      if (!alloc_cpumask_var(&d->this_book_map, GFP_KERNEL))
                return sa_this_core_map;
 +      if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
 +              return sa_this_book_map;
        if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
                return sa_send_covered;
        d->rd = alloc_rootdomain();
@@@ -7036,23 -7009,6 +7036,23 @@@ static struct sched_domain *__build_cpu
        return sd;
  }
  
 +static struct sched_domain *__build_book_sched_domain(struct s_data *d,
 +      const struct cpumask *cpu_map, struct sched_domain_attr *attr,
 +      struct sched_domain *parent, int i)
 +{
 +      struct sched_domain *sd = parent;
 +#ifdef CONFIG_SCHED_BOOK
 +      sd = &per_cpu(book_domains, i).sd;
 +      SD_INIT(sd, BOOK);
 +      set_domain_attribute(sd, attr);
 +      cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i));
 +      sd->parent = parent;
 +      parent->child = sd;
 +      cpu_to_book_group(i, cpu_map, &sd->groups, d->tmpmask);
 +#endif
 +      return sd;
 +}
 +
  static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
        const struct cpumask *cpu_map, struct sched_domain_attr *attr,
        struct sched_domain *parent, int i)
@@@ -7109,15 -7065,6 +7109,15 @@@ static void build_sched_groups(struct s
                                                &cpu_to_core_group,
                                                d->send_covered, d->tmpmask);
                break;
 +#endif
 +#ifdef CONFIG_SCHED_BOOK
 +      case SD_LV_BOOK: /* set up book groups */
 +              cpumask_and(d->this_book_map, cpu_map, cpu_book_mask(cpu));
 +              if (cpu == cpumask_first(d->this_book_map))
 +                      init_sched_build_groups(d->this_book_map, cpu_map,
 +                                              &cpu_to_book_group,
 +                                              d->send_covered, d->tmpmask);
 +              break;
  #endif
        case SD_LV_CPU: /* set up physical groups */
                cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
@@@ -7166,14 -7113,12 +7166,14 @@@ static int __build_sched_domains(const 
  
                sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
                sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
 +              sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i);
                sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
                sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
        }
  
        for_each_cpu(i, cpu_map) {
                build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
 +              build_sched_groups(&d, SD_LV_BOOK, cpu_map, i);
                build_sched_groups(&d, SD_LV_MC, cpu_map, i);
        }
  
                init_sched_groups_power(i, sd);
        }
  #endif
 +#ifdef CONFIG_SCHED_BOOK
 +      for_each_cpu(i, cpu_map) {
 +              sd = &per_cpu(book_domains, i).sd;
 +              init_sched_groups_power(i, sd);
 +      }
 +#endif
  
        for_each_cpu(i, cpu_map) {
                sd = &per_cpu(phys_domains, i).sd;
                sd = &per_cpu(cpu_domains, i).sd;
  #elif defined(CONFIG_SCHED_MC)
                sd = &per_cpu(core_domains, i).sd;
 +#elif defined(CONFIG_SCHED_BOOK)
 +              sd = &per_cpu(book_domains, i).sd;
  #else
                sd = &per_cpu(phys_domains, i).sd;
  #endif
diff --combined kernel/sched_fair.c
index aa16cf1eb8fea4ed6db0d34ba9e3e9d84d72c3be,db3f674ca49dbe93a611716b650bb8c715464da3..623e9aceef8f1bde4812525cb0bda5e6feb59270
@@@ -3031,14 -3031,7 +3031,14 @@@ redo
  
        if (!ld_moved) {
                schedstat_inc(sd, lb_failed[idle]);
 -              sd->nr_balance_failed++;
 +              /*
 +               * Increment the failure counter only on periodic balance.
 +               * We do not want newidle balance, which can be very
 +               * frequent, pollute the failure counter causing
 +               * excessive cache_hot migrations and active balances.
 +               */
 +              if (idle != CPU_NEWLY_IDLE)
 +                      sd->nr_balance_failed++;
  
                if (need_active_balance(sd, sd_idle, idle, cpu_of(busiest),
                                        this_cpu)) {
@@@ -3637,7 -3630,7 +3637,7 @@@ static inline int nohz_kick_needed(stru
        if (time_before(now, nohz.next_balance))
                return 0;
  
-       if (!rq->nr_running)
+       if (rq->idle_at_tick)
                return 0;
  
        first_pick_cpu = atomic_read(&nohz.first_pick_cpu);