]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - kernel/sched.c
Merge branch 'linus' into x86/mm
[mv-sheeva.git] / kernel / sched.c
index 65c02037b0524637bcad1e1d4ee063fd17172b43..8ee437a5ec1d5186bc411b146a513280533ff998 100644 (file)
@@ -1323,8 +1323,8 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
  * slice expiry etc.
  */
 
-#define WEIGHT_IDLEPRIO                2
-#define WMULT_IDLEPRIO         (1 << 31)
+#define WEIGHT_IDLEPRIO                3
+#define WMULT_IDLEPRIO         1431655765
 
 /*
  * Nice levels are multiplicative, with a gentle 10% change for every
@@ -2266,6 +2266,16 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
        if (!sched_feat(SYNC_WAKEUPS))
                sync = 0;
 
+       if (!sync) {
+               if (current->se.avg_overlap < sysctl_sched_migration_cost &&
+                         p->se.avg_overlap < sysctl_sched_migration_cost)
+                       sync = 1;
+       } else {
+               if (current->se.avg_overlap >= sysctl_sched_migration_cost ||
+                         p->se.avg_overlap >= sysctl_sched_migration_cost)
+                       sync = 0;
+       }
+
 #ifdef CONFIG_SMP
        if (sched_feat(LB_WAKEUP_UPDATE)) {
                struct sched_domain *sd;
@@ -4440,7 +4450,7 @@ void __kprobes sub_preempt_count(int val)
        /*
         * Underflow?
         */
-       if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked())))
+       if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
                return;
        /*
         * Is the spinlock portion underflowing?
@@ -4687,8 +4697,8 @@ EXPORT_SYMBOL(default_wake_function);
  * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
  * zero in this (rare) case, and we handle it by continuing to scan the queue.
  */
-static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
-                            int nr_exclusive, int sync, void *key)
+void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
+                       int nr_exclusive, int sync, void *key)
 {
        wait_queue_t *curr, *next;
 
@@ -5869,7 +5879,7 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
  * this syscall writes the default timeslice value of a given process
  * into the user-space timespec buffer. A value of '0' means infinity.
  */
-SYSCALL_DEFINE4(sched_rr_get_interval, pid_t, pid,
+SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
                struct timespec __user *, interval)
 {
        struct task_struct *p;
@@ -9050,6 +9060,13 @@ static int tg_schedulable(struct task_group *tg, void *data)
                runtime = d->rt_runtime;
        }
 
+#ifdef CONFIG_USER_SCHED
+       if (tg == &root_task_group) {
+               period = global_rt_period();
+               runtime = global_rt_runtime();
+       }
+#endif
+
        /*
         * Cannot have more runtime than the period.
         */