]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - kernel/sched.c
[PATCH] BLOCK: Remove duplicate declaration of exit_io_context() [try #6]
[karo-tx-linux.git] / kernel / sched.c
index c3c718aea618c505247631ccde8ccd1c0c31d3b0..74f169ac0773cd2ac3eeac0bfbb3c3a631d70c65 100644 (file)
@@ -1755,27 +1755,27 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
        __releases(rq->lock)
 {
        struct mm_struct *mm = rq->prev_mm;
-       unsigned long prev_task_flags;
+       long prev_state;
 
        rq->prev_mm = NULL;
 
        /*
         * A task struct has one reference for the use as "current".
-        * If a task dies, then it sets EXIT_ZOMBIE in tsk->exit_state and
-        * calls schedule one last time. The schedule call will never return,
-        * and the scheduled task must drop that reference.
-        * The test for EXIT_ZOMBIE must occur while the runqueue locks are
+        * If a task dies, then it sets TASK_DEAD in tsk->state and calls
+        * schedule one last time. The schedule call will never return, and
+        * the scheduled task must drop that reference.
+        * The test for TASK_DEAD must occur while the runqueue locks are
         * still held, otherwise prev could be scheduled on another cpu, die
         * there before we look at prev->state, and then the reference would
         * be dropped twice.
         *              Manfred Spraul <manfred@colorfullife.com>
         */
-       prev_task_flags = prev->flags;
+       prev_state = prev->state;
        finish_arch_switch(prev);
        finish_lock_switch(rq, prev);
        if (mm)
                mmdrop(mm);
-       if (unlikely(prev_task_flags & PF_DEAD)) {
+       if (unlikely(prev_state == TASK_DEAD)) {
                /*
                 * Remove function-return probe instances associated with this
                 * task and put them back on the free list.
@@ -3348,9 +3348,6 @@ need_resched_nonpreemptible:
 
        spin_lock_irq(&rq->lock);
 
-       if (unlikely(prev->flags & PF_DEAD))
-               prev->state = EXIT_DEAD;
-
        switch_count = &prev->nivcsw;
        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
                switch_count = &prev->nvcsw;
@@ -4116,27 +4113,25 @@ recheck:
         * Allow unprivileged RT tasks to decrease priority:
         */
        if (!capable(CAP_SYS_NICE)) {
-               unsigned long rlim_rtprio;
-               unsigned long flags;
-
-               if (!lock_task_sighand(p, &flags))
-                       return -ESRCH;
-               rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
-               unlock_task_sighand(p, &flags);
+               if (is_rt_policy(policy)) {
+                       unsigned long rlim_rtprio;
+                       unsigned long flags;
+
+                       if (!lock_task_sighand(p, &flags))
+                               return -ESRCH;
+                       rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
+                       unlock_task_sighand(p, &flags);
+
+                       /* can't set/change the rt policy */
+                       if (policy != p->policy && !rlim_rtprio)
+                               return -EPERM;
+
+                       /* can't increase priority */
+                       if (param->sched_priority > p->rt_priority &&
+                           param->sched_priority > rlim_rtprio)
+                               return -EPERM;
+               }
 
-               /*
-                * can't change policy, except between SCHED_NORMAL
-                * and SCHED_BATCH:
-                */
-               if (((policy != SCHED_NORMAL && p->policy != SCHED_BATCH) &&
-                       (policy != SCHED_BATCH && p->policy != SCHED_NORMAL)) &&
-                               !rlim_rtprio)
-                       return -EPERM;
-               /* can't increase priority */
-               if (is_rt_policy(policy) &&
-                   param->sched_priority > p->rt_priority &&
-                   param->sched_priority > rlim_rtprio)
-                       return -EPERM;
                /* can't change other user's priorities */
                if ((current->euid != p->euid) &&
                    (current->euid != p->uid))
@@ -5158,7 +5153,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
        BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD);
 
        /* Cannot have done final schedule yet: would have vanished. */
-       BUG_ON(p->flags & PF_DEAD);
+       BUG_ON(p->state == TASK_DEAD);
 
        get_task_struct(p);