]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
perf_counter: x86: More accurate counter update
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Wed, 13 May 2009 07:45:19 +0000 (09:45 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 15 May 2009 07:46:54 +0000 (09:46 +0200)
Take the counter width into account instead of assuming 32 bits.

In particular Nehalem has 44 bit wide counters, and all
arithmetics should happen on a 44-bit signed integer basis.

[ Impact: fix rare event imprecision, warning message on Nehalem ]

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_counter.c

index f7772ff7936edd5967c5c1962ba226717137c740..3a92a2b2a80f56aa18c7dc96e7add019540637aa 100644 (file)
@@ -138,7 +138,9 @@ static u64
 x86_perf_counter_update(struct perf_counter *counter,
                        struct hw_perf_counter *hwc, int idx)
 {
-       u64 prev_raw_count, new_raw_count, delta;
+       int shift = 64 - x86_pmu.counter_bits;
+       u64 prev_raw_count, new_raw_count;
+       s64 delta;
 
        /*
         * Careful: an NMI might modify the previous counter value.
@@ -161,9 +163,10 @@ again:
         * (counter-)time and add that to the generic counter.
         *
         * Careful, not all hw sign-extends above the physical width
-        * of the count, so we do that by clipping the delta to 32 bits:
+        * of the count.
         */
-       delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
+       delta = (new_raw_count << shift) - (prev_raw_count << shift);
+       delta >>= shift;
 
        atomic64_add(delta, &counter->count);
        atomic64_sub(delta, &hwc->period_left);