]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - arch/powerpc/kernel/perf_event_fsl_emb.c
perf: Deconstify struct pmu
[mv-sheeva.git] / arch / powerpc / kernel / perf_event_fsl_emb.c
index babcceecd2eab6335217d2434c6e91e5c7fe5884..d7619b5e7a6e58f90451796ab424c8a47e746689 100644 (file)
@@ -162,15 +162,15 @@ static void fsl_emb_pmu_read(struct perf_event *event)
         * Therefore we treat them like NMIs.
         */
        do {
-               prev = atomic64_read(&event->hw.prev_count);
+               prev = local64_read(&event->hw.prev_count);
                barrier();
                val = read_pmc(event->hw.idx);
-       } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
+       } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
 
        /* The counters are only 32 bits wide */
        delta = (val - prev) & 0xfffffffful;
-       atomic64_add(delta, &event->count);
-       atomic64_sub(delta, &event->hw.period_left);
+       local64_add(delta, &event->count);
+       local64_sub(delta, &event->hw.period_left);
 }
 
 /*
@@ -296,11 +296,11 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
 
        val = 0;
        if (event->hw.sample_period) {
-               s64 left = atomic64_read(&event->hw.period_left);
+               s64 left = local64_read(&event->hw.period_left);
                if (left < 0x80000000L)
                        val = 0x80000000L - left;
        }
-       atomic64_set(&event->hw.prev_count, val);
+       local64_set(&event->hw.prev_count, val);
        write_pmc(i, val);
        perf_event_update_userpage(event);
 
@@ -371,8 +371,8 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event)
        if (left < 0x80000000L)
                val = 0x80000000L - left;
        write_pmc(event->hw.idx, val);
-       atomic64_set(&event->hw.prev_count, val);
-       atomic64_set(&event->hw.period_left, left);
+       local64_set(&event->hw.prev_count, val);
+       local64_set(&event->hw.period_left, left);
        perf_event_update_userpage(event);
        perf_enable();
        local_irq_restore(flags);
@@ -428,7 +428,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
        return 0;
 }
 
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+struct pmu *hw_perf_event_init(struct perf_event *event)
 {
        u64 ev;
        struct perf_event *events[MAX_HWEVENTS];
@@ -500,7 +500,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
                return ERR_PTR(-ENOTSUPP);
 
        event->hw.last_period = event->hw.sample_period;
-       atomic64_set(&event->hw.period_left, event->hw.last_period);
+       local64_set(&event->hw.period_left, event->hw.last_period);
 
        /*
         * See if we need to reserve the PMU.
@@ -541,16 +541,16 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
        int record = 0;
 
        /* we don't have to worry about interrupts here */
-       prev = atomic64_read(&event->hw.prev_count);
+       prev = local64_read(&event->hw.prev_count);
        delta = (val - prev) & 0xfffffffful;
-       atomic64_add(delta, &event->count);
+       local64_add(delta, &event->count);
 
        /*
         * See if the total period for this event has expired,
         * and update for the next period.
         */
        val = 0;
-       left = atomic64_read(&event->hw.period_left) - delta;
+       left = local64_read(&event->hw.period_left) - delta;
        if (period) {
                if (left <= 0) {
                        left += period;
@@ -569,6 +569,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                struct perf_sample_data data;
 
                perf_sample_data_init(&data, 0);
+               data.period = event->hw.last_period;
 
                if (perf_event_overflow(event, nmi, &data, regs)) {
                        /*
@@ -584,8 +585,8 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
        }
 
        write_pmc(event->hw.idx, val);
-       atomic64_set(&event->hw.prev_count, val);
-       atomic64_set(&event->hw.period_left, left);
+       local64_set(&event->hw.prev_count, val);
+       local64_set(&event->hw.period_left, left);
        perf_event_update_userpage(event);
 }