]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - kernel/sched.c
Merge branch 'linus' into sched/core
[mv-sheeva.git] / kernel / sched.c
index 7c9098d186e6f8e398c0cb9500c1efa2120293e2..168b2680ae27fa8f9bc2039602d1f6b8229e99de 100644 (file)
@@ -2614,9 +2614,32 @@ void sched_fork(struct task_struct *p, int clone_flags)
        set_task_cpu(p, cpu);
 
        /*
-        * Make sure we do not leak PI boosting priority to the child:
+        * Make sure we do not leak PI boosting priority to the child.
         */
        p->prio = current->normal_prio;
+
+       /*
+        * Revert to default priority/policy on fork if requested.
+        */
+       if (unlikely(p->sched_reset_on_fork)) {
+               if (p->policy == SCHED_FIFO || p->policy == SCHED_RR)
+                       p->policy = SCHED_NORMAL;
+
+               if (p->normal_prio < DEFAULT_PRIO)
+                       p->prio = DEFAULT_PRIO;
+
+               if (PRIO_TO_NICE(p->static_prio) < 0) {
+                       p->static_prio = NICE_TO_PRIO(0);
+                       set_load_weight(p);
+               }
+
+               /*
+                * We don't need the reset flag anymore after the fork. It has
+                * fulfilled its duty:
+                */
+               p->sched_reset_on_fork = 0;
+       }
+
        if (!rt_prio(p->prio))
                p->sched_class = &fair_sched_class;
 
@@ -6100,17 +6123,25 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
        unsigned long flags;
        const struct sched_class *prev_class = p->sched_class;
        struct rq *rq;
+       int reset_on_fork;
 
        /* may grab non-irq protected spin_locks */
        BUG_ON(in_interrupt());
 recheck:
        /* double check policy once rq lock held */
-       if (policy < 0)
+       if (policy < 0) {
+               reset_on_fork = p->sched_reset_on_fork;
                policy = oldpolicy = p->policy;
-       else if (policy != SCHED_FIFO && policy != SCHED_RR &&
-                       policy != SCHED_NORMAL && policy != SCHED_BATCH &&
-                       policy != SCHED_IDLE)
-               return -EINVAL;
+       } else {
+               reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
+               policy &= ~SCHED_RESET_ON_FORK;
+
+               if (policy != SCHED_FIFO && policy != SCHED_RR &&
+                               policy != SCHED_NORMAL && policy != SCHED_BATCH &&
+                               policy != SCHED_IDLE)
+                       return -EINVAL;
+       }
+
        /*
         * Valid priorities for SCHED_FIFO and SCHED_RR are
         * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
@@ -6154,6 +6185,10 @@ recheck:
                /* can't change other user's priorities */
                if (!check_same_owner(p))
                        return -EPERM;
+
+               /* Normal users shall not reset the sched_reset_on_fork flag */
+               if (p->sched_reset_on_fork && !reset_on_fork)
+                       return -EPERM;
        }
 
        if (user) {
@@ -6197,6 +6232,8 @@ recheck:
        if (running)
                p->sched_class->put_prev_task(rq, p);
 
+       p->sched_reset_on_fork = reset_on_fork;
+
        oldprio = p->prio;
        __setscheduler(rq, p, policy, param->sched_priority);
 
@@ -6313,14 +6350,15 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
        if (p) {
                retval = security_task_getscheduler(p);
                if (!retval)
-                       retval = p->policy;
+                       retval = p->policy
+                               | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
        }
        read_unlock(&tasklist_lock);
        return retval;
 }
 
 /**
- * sys_sched_getscheduler - get the RT priority of a thread
+ * sys_sched_getparam - get the RT priority of a thread
  * @pid: the pid in question.
  * @param: structure containing the RT priority.
  */