]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
sched/core: Add missing update_rq_clock() call for task_hot()
authorPeter Zijlstra <peterz@infradead.org>
Mon, 3 Oct 2016 14:35:32 +0000 (16:35 +0200)
committerIngo Molnar <mingo@kernel.org>
Sat, 14 Jan 2017 10:29:34 +0000 (11:29 +0100)
Add the update_rq_clock() call at the top of the callstack instead of
at the bottom where we find it missing, this to aid later effort to
minimize the number of update_rq_lock() calls.

  WARNING: CPU: 30 PID: 194 at ../kernel/sched/sched.h:797 assert_clock_updated()
  rq->clock_update_flags < RQCF_ACT_SKIP

  Call Trace:
    dump_stack()
    __warn()
    warn_slowpath_fmt()
    assert_clock_updated.isra.63.part.64()
    can_migrate_task()
    load_balance()
    pick_next_task_fair()
    __schedule()
    schedule()
    worker_thread()
    kthread()

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/fair.c

index 972b676229220119fb2ebcdf0a2503919da4ce84..b3bfe3fb4e1361474d6852896fefeb0e84236221 100644 (file)
@@ -8070,6 +8070,7 @@ redo:
 
 more_balance:
                raw_spin_lock_irqsave(&busiest->lock, flags);
+               update_rq_clock(busiest);
 
                /*
                 * cur_ld_moved - load moved in current iteration
@@ -8446,6 +8447,7 @@ static int active_load_balance_cpu_stop(void *data)
                };
 
                schedstat_inc(sd->alb_count);
+               update_rq_clock(busiest_rq);
 
                p = detach_one_task(&env);
                if (p) {