]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
ARM: 7354/1: perf: limit sample_period to half max_period in non-sampling mode
authorWill Deacon <will.deacon@arm.com>
Tue, 6 Mar 2012 16:33:17 +0000 (17:33 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Wed, 7 Mar 2012 09:40:48 +0000 (09:40 +0000)
On ARM, the PMU does not stop counting after an overflow and therefore
IRQ latency affects the new counter value read by the kernel. This is
significant for non-sampling runs where it is possible for the new value
to overtake the previous one, causing the delta to be out by up to
max_period events.

Commit a737823d ("ARM: 6835/1: perf: ensure overflows aren't missed due
to IRQ latency") attempted to fix this problem by allowing interrupt
handlers to pass an overflow flag to the event update function, causing
the overflow calculation to assume that the counter passed through zero
when going from prev to new. Unfortunately, this doesn't work when
overflow occurs on the perf_task_tick path because we have the flag
cleared and end up computing a large negative delta.

This patch removes the overflow flag from armpmu_event_update and
instead limits the sample_period to half of the max_period for
non-sampling profiling runs.

Cc: <stable@vger.kernel.org>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/include/asm/pmu.h
arch/arm/kernel/perf_event.c
arch/arm/kernel/perf_event_v6.c
arch/arm/kernel/perf_event_v7.c
arch/arm/kernel/perf_event_xscale.c

index b5a5be2536c1158acf5a9c979eccb0f5588bb97c..90114faa9f3c7c087f6fce6b871b16ab5e433b44 100644 (file)
@@ -134,7 +134,7 @@ int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
 
 u64 armpmu_event_update(struct perf_event *event,
                        struct hw_perf_event *hwc,
-                       int idx, int overflow);
+                       int idx);
 
 int armpmu_event_set_period(struct perf_event *event,
                            struct hw_perf_event *hwc,
index 5bb91bf3d47f24c9c6fb75324535c39e54b2c62e..56173ae164489cdd5d0a7f3fba2461c114a13cf6 100644 (file)
@@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event,
 u64
 armpmu_event_update(struct perf_event *event,
                    struct hw_perf_event *hwc,
-                   int idx, int overflow)
+                   int idx)
 {
        struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
        u64 delta, prev_raw_count, new_raw_count;
@@ -193,13 +193,7 @@ again:
                             new_raw_count) != prev_raw_count)
                goto again;
 
-       new_raw_count &= armpmu->max_period;
-       prev_raw_count &= armpmu->max_period;
-
-       if (overflow)
-               delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
-       else
-               delta = new_raw_count - prev_raw_count;
+       delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
 
        local64_add(delta, &event->count);
        local64_sub(delta, &hwc->period_left);
@@ -216,7 +210,7 @@ armpmu_read(struct perf_event *event)
        if (hwc->idx < 0)
                return;
 
-       armpmu_event_update(event, hwc, hwc->idx, 0);
+       armpmu_event_update(event, hwc, hwc->idx);
 }
 
 static void
@@ -232,7 +226,7 @@ armpmu_stop(struct perf_event *event, int flags)
        if (!(hwc->state & PERF_HES_STOPPED)) {
                armpmu->disable(hwc, hwc->idx);
                barrier(); /* why? */
-               armpmu_event_update(event, hwc, hwc->idx, 0);
+               armpmu_event_update(event, hwc, hwc->idx);
                hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
        }
 }
@@ -518,7 +512,13 @@ __hw_perf_event_init(struct perf_event *event)
        hwc->config_base            |= (unsigned long)mapping;
 
        if (!hwc->sample_period) {
-               hwc->sample_period  = armpmu->max_period;
+               /*
+                * For non-sampling runs, limit the sample_period to half
+                * of the counter width. That way, the new counter value
+                * is far less likely to overtake the previous one unless
+                * you have some serious IRQ latency issues.
+                */
+               hwc->sample_period  = armpmu->max_period >> 1;
                hwc->last_period    = hwc->sample_period;
                local64_set(&hwc->period_left, hwc->sample_period);
        }
index 533be9930ec22f803e672a2e57217b8c65bfc40b..88bf15253fbd33a18074b995a8e3d54a0f8bbb4b 100644 (file)
@@ -524,7 +524,7 @@ armv6pmu_handle_irq(int irq_num,
                        continue;
 
                hwc = &event->hw;
-               armpmu_event_update(event, hwc, idx, 1);
+               armpmu_event_update(event, hwc, idx);
                data.period = event->hw.last_period;
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;
index 6933244c68f964ffed73f11cfbdaac1ca13b2f74..6f488610f8fcb634a69732f18420a22f4168c2cf 100644 (file)
@@ -963,7 +963,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
                        continue;
 
                hwc = &event->hw;
-               armpmu_event_update(event, hwc, idx, 1);
+               armpmu_event_update(event, hwc, idx);
                data.period = event->hw.last_period;
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;
index 3b99d8269829b971db3d8f0618629c77d8317c5a..831e019b017c4513cf413af62a56d8f9a239b8b5 100644 (file)
@@ -259,7 +259,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
                        continue;
 
                hwc = &event->hw;
-               armpmu_event_update(event, hwc, idx, 1);
+               armpmu_event_update(event, hwc, idx);
                data.period = event->hw.last_period;
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;
@@ -596,7 +596,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
                        continue;
 
                hwc = &event->hw;
-               armpmu_event_update(event, hwc, idx, 1);
+               armpmu_event_update(event, hwc, idx);
                data.period = event->hw.last_period;
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;