]> git.karo-electronics.de Git - linux-beck.git/commitdiff
sched/core: Validate rq_clock*() serialization
authorPeter Zijlstra <peterz@infradead.org>
Mon, 5 Jan 2015 10:18:10 +0000 (11:18 +0100)
committerIngo Molnar <mingo@kernel.org>
Wed, 14 Jan 2015 12:34:19 +0000 (13:34 +0100)
rq->clock{,_task} are serialized by rq->lock, verify this.

One immediate fail is the usage in scale_rt_capability, so 'annotate'
that for now, there's more 'funny' there. Maybe change rq->lock into a
raw_seqlock_t?

(Only 32-bit is affected)

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20150105103554.361872747@infradead.org
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: umgwanakikbuti@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/fair.c
kernel/sched/sched.h

index 2a0b302e51def3f5a12ac09810d98010c06da3f6..50ff90289293a6c1da9fa672aa23ed7fe491b68c 100644 (file)
@@ -5948,8 +5948,8 @@ static unsigned long scale_rt_capacity(int cpu)
         */
        age_stamp = ACCESS_ONCE(rq->age_stamp);
        avg = ACCESS_ONCE(rq->rt_avg);
+       delta = __rq_clock_broken(rq) - age_stamp;
 
-       delta = rq_clock(rq) - age_stamp;
        if (unlikely(delta < 0))
                delta = 0;
 
index 9a2a45c970e7dcbc0c146c027acc1bab713ff4ee..bd2373273a9e3b2b970bd9c47555586aad684e0f 100644 (file)
@@ -687,13 +687,20 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 #define cpu_curr(cpu)          (cpu_rq(cpu)->curr)
 #define raw_rq()               raw_cpu_ptr(&runqueues)
 
+static inline u64 __rq_clock_broken(struct rq *rq)
+{
+       return ACCESS_ONCE(rq->clock);
+}
+
 static inline u64 rq_clock(struct rq *rq)
 {
+       lockdep_assert_held(&rq->lock);
        return rq->clock;
 }
 
 static inline u64 rq_clock_task(struct rq *rq)
 {
+       lockdep_assert_held(&rq->lock);
        return rq->clock_task;
 }