]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - kernel/perf_event.c
perf: Ignore non-sampling overflows
[mv-sheeva.git] / kernel / perf_event.c
index 517d827f498281da50210aa76a02b4693116154a..af1e63f249f38ae822643c945e5cf09f8396d6a6 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/perf_event.h>
 #include <linux/ftrace_event.h>
+#include <linux/hw_breakpoint.h>
 
 #include <asm/irq_regs.h>
 
@@ -674,6 +675,8 @@ event_sched_in(struct perf_event *event,
 
        event->tstamp_running += ctx->time - event->tstamp_stopped;
 
+       event->shadow_ctx_time = ctx->time - ctx->timestamp;
+
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
        ctx->nr_active++;
@@ -1070,7 +1073,7 @@ static int perf_event_refresh(struct perf_event *event, int refresh)
        /*
         * not supported on inherited events
         */
-       if (event->attr.inherit)
+       if (event->attr.inherit || !is_sampling_event(event))
                return -EINVAL;
 
        atomic_add(refresh, &event->event_limit);
@@ -1284,8 +1287,6 @@ void __perf_event_task_sched_out(struct task_struct *task,
 {
        int ctxn;
 
-       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
-
        for_each_task_context_nr(ctxn)
                perf_event_context_sched_out(task, ctxn, next);
 }
@@ -1619,8 +1620,12 @@ static void rotate_ctx(struct perf_event_context *ctx)
 {
        raw_spin_lock(&ctx->lock);
 
-       /* Rotate the first entry last of non-pinned groups */
-       list_rotate_left(&ctx->flexible_groups);
+       /*
+        * Rotate the first entry last of non-pinned groups. Rotation might be
+        * disabled by the inheritance code.
+        */
+       if (!ctx->rotate_disable)
+               list_rotate_left(&ctx->flexible_groups);
 
        raw_spin_unlock(&ctx->lock);
 }
@@ -2232,11 +2237,6 @@ int perf_event_release_kernel(struct perf_event *event)
        raw_spin_unlock_irq(&ctx->lock);
        mutex_unlock(&ctx->mutex);
 
-       mutex_lock(&event->owner->perf_event_mutex);
-       list_del_init(&event->owner_entry);
-       mutex_unlock(&event->owner->perf_event_mutex);
-       put_task_struct(event->owner);
-
        free_event(event);
 
        return 0;
@@ -2249,9 +2249,43 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
 static int perf_release(struct inode *inode, struct file *file)
 {
        struct perf_event *event = file->private_data;
+       struct task_struct *owner;
 
        file->private_data = NULL;
 
+       rcu_read_lock();
+       owner = ACCESS_ONCE(event->owner);
+       /*
+        * Matches the smp_wmb() in perf_event_exit_task(). If we observe
+        * !owner it means the list deletion is complete and we can indeed
+        * free this event, otherwise we need to serialize on
+        * owner->perf_event_mutex.
+        */
+       smp_read_barrier_depends();
+       if (owner) {
+               /*
+                * Since delayed_put_task_struct() also drops the last
+                * task reference we can safely take a new reference
+                * while holding the rcu_read_lock().
+                */
+               get_task_struct(owner);
+       }
+       rcu_read_unlock();
+
+       if (owner) {
+               mutex_lock(&owner->perf_event_mutex);
+               /*
+                * We have to re-check the event->owner field, if it is cleared
+                * we raced with perf_event_exit_task(), acquiring the mutex
+                * ensured they're done, and we can proceed with freeing the
+                * event.
+                */
+               if (event->owner)
+                       list_del_init(&event->owner_entry);
+               mutex_unlock(&owner->perf_event_mutex);
+               put_task_struct(owner);
+       }
+
        return perf_event_release_kernel(event);
 }
 
@@ -2480,7 +2514,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
        int ret = 0;
        u64 value;
 
-       if (!event->attr.sample_period)
+       if (!is_sampling_event(event))
                return -EINVAL;
 
        if (copy_from_user(&value, arg, sizeof(value)))
@@ -3396,7 +3430,8 @@ static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
 }
 
 static void perf_output_read_one(struct perf_output_handle *handle,
-                                struct perf_event *event)
+                                struct perf_event *event,
+                                u64 enabled, u64 running)
 {
        u64 read_format = event->attr.read_format;
        u64 values[4];
@@ -3404,11 +3439,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
 
        values[n++] = perf_event_count(event);
        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
-               values[n++] = event->total_time_enabled +
+               values[n++] = enabled +
                        atomic64_read(&event->child_total_time_enabled);
        }
        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
-               values[n++] = event->total_time_running +
+               values[n++] = running +
                        atomic64_read(&event->child_total_time_running);
        }
        if (read_format & PERF_FORMAT_ID)
@@ -3421,7 +3456,8 @@ static void perf_output_read_one(struct perf_output_handle *handle,
  * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
  */
 static void perf_output_read_group(struct perf_output_handle *handle,
-                           struct perf_event *event)
+                           struct perf_event *event,
+                           u64 enabled, u64 running)
 {
        struct perf_event *leader = event->group_leader, *sub;
        u64 read_format = event->attr.read_format;
@@ -3431,10 +3467,10 @@ static void perf_output_read_group(struct perf_output_handle *handle,
        values[n++] = 1 + leader->nr_siblings;
 
        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
-               values[n++] = leader->total_time_enabled;
+               values[n++] = enabled;
 
        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
-               values[n++] = leader->total_time_running;
+               values[n++] = running;
 
        if (leader != event)
                leader->pmu->read(leader);
@@ -3459,13 +3495,35 @@ static void perf_output_read_group(struct perf_output_handle *handle,
        }
 }
 
+#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
+                                PERF_FORMAT_TOTAL_TIME_RUNNING)
+
 static void perf_output_read(struct perf_output_handle *handle,
                             struct perf_event *event)
 {
+       u64 enabled = 0, running = 0, now, ctx_time;
+       u64 read_format = event->attr.read_format;
+
+       /*
+        * compute total_time_enabled, total_time_running
+        * based on snapshot values taken when the event
+        * was last scheduled in.
+        *
+        * we cannot simply called update_context_time()
+        * because of locking issue as we are called in
+        * NMI context
+        */
+       if (read_format & PERF_FORMAT_TOTAL_TIMES) {
+               now = perf_clock();
+               ctx_time = event->shadow_ctx_time + now;
+               enabled = ctx_time - event->tstamp_enabled;
+               running = ctx_time - event->tstamp_running;
+       }
+
        if (event->attr.read_format & PERF_FORMAT_GROUP)
-               perf_output_read_group(handle, event);
+               perf_output_read_group(handle, event, enabled, running);
        else
-               perf_output_read_one(handle, event);
+               perf_output_read_one(handle, event, enabled, running);
 }
 
 void perf_output_sample(struct perf_output_handle *handle,
@@ -4182,6 +4240,13 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
        struct hw_perf_event *hwc = &event->hw;
        int ret = 0;
 
+       /*
+        * Non-sampling counters might still use the PMI to fold short
+        * hardware counters, ignore those.
+        */
+       if (unlikely(!is_sampling_event(event)))
+               return 0;
+
        if (!throttle) {
                hwc->interrupts++;
        } else {
@@ -4327,7 +4392,7 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
        if (!regs)
                return;
 
-       if (!hwc->sample_period)
+       if (!is_sampling_event(event))
                return;
 
        if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
@@ -4490,7 +4555,7 @@ static int perf_swevent_add(struct perf_event *event, int flags)
        struct hw_perf_event *hwc = &event->hw;
        struct hlist_head *head;
 
-       if (hwc->sample_period) {
+       if (is_sampling_event(event)) {
                hwc->last_period = hwc->sample_period;
                perf_swevent_set_period(event);
        }
@@ -4747,15 +4812,6 @@ static int perf_tp_event_init(struct perf_event *event)
        if (event->attr.type != PERF_TYPE_TRACEPOINT)
                return -ENOENT;
 
-       /*
-        * Raw tracepoint data is a severe data leak, only allow root to
-        * have these.
-        */
-       if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
-                       perf_paranoid_tracepoint_raw() &&
-                       !capable(CAP_SYS_ADMIN))
-               return -EPERM;
-
        err = perf_trace_init(event);
        if (err)
                return err;
@@ -4868,31 +4924,33 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
 static void perf_swevent_start_hrtimer(struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
+       s64 period;
+
+       if (!is_sampling_event(event))
+               return;
 
        hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        hwc->hrtimer.function = perf_swevent_hrtimer;
-       if (hwc->sample_period) {
-               s64 period = local64_read(&hwc->period_left);
 
-               if (period) {
-                       if (period < 0)
-                               period = 10000;
+       period = local64_read(&hwc->period_left);
+       if (period) {
+               if (period < 0)
+                       period = 10000;
 
-                       local64_set(&hwc->period_left, 0);
-               } else {
-                       period = max_t(u64, 10000, hwc->sample_period);
-               }
-               __hrtimer_start_range_ns(&hwc->hrtimer,
+               local64_set(&hwc->period_left, 0);
+       } else {
+               period = max_t(u64, 10000, hwc->sample_period);
+       }
+       __hrtimer_start_range_ns(&hwc->hrtimer,
                                ns_to_ktime(period), 0,
                                HRTIMER_MODE_REL_PINNED, 0);
-       }
 }
 
 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
 
-       if (hwc->sample_period) {
+       if (is_sampling_event(event)) {
                ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
                local64_set(&hwc->period_left, ktime_to_ns(remaining));
 
@@ -5651,7 +5709,7 @@ SYSCALL_DEFINE5(perf_event_open,
        mutex_unlock(&ctx->mutex);
 
        event->owner = current;
-       get_task_struct(current);
+
        mutex_lock(&current->perf_event_mutex);
        list_add_tail(&event->owner_entry, &current->perf_event_list);
        mutex_unlock(&current->perf_event_mutex);
@@ -5719,12 +5777,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
        ++ctx->generation;
        mutex_unlock(&ctx->mutex);
 
-       event->owner = current;
-       get_task_struct(current);
-       mutex_lock(&current->perf_event_mutex);
-       list_add_tail(&event->owner_entry, &current->perf_event_list);
-       mutex_unlock(&current->perf_event_mutex);
-
        return event;
 
 err_free:
@@ -5875,8 +5927,24 @@ again:
  */
 void perf_event_exit_task(struct task_struct *child)
 {
+       struct perf_event *event, *tmp;
        int ctxn;
 
+       mutex_lock(&child->perf_event_mutex);
+       list_for_each_entry_safe(event, tmp, &child->perf_event_list,
+                                owner_entry) {
+               list_del_init(&event->owner_entry);
+
+               /*
+                * Ensure the list deletion is visible before we clear
+                * the owner, closes a race against perf_release() where
+                * we need to serialize on the owner->perf_event_mutex.
+                */
+               smp_wmb();
+               event->owner = NULL;
+       }
+       mutex_unlock(&child->perf_event_mutex);
+
        for_each_task_context_nr(ctxn)
                perf_event_exit_task_context(child, ctxn);
 }
@@ -6096,6 +6164,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
        struct perf_event *event;
        struct task_struct *parent = current;
        int inherited_all = 1;
+       unsigned long flags;
        int ret = 0;
 
        child->perf_event_ctxp[ctxn] = NULL;
@@ -6136,6 +6205,15 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
                        break;
        }
 
+       /*
+        * We can't hold ctx->lock when iterating the ->flexible_group list due
+        * to allocations, but we need to prevent rotation because
+        * rotate_ctx() will change the list from interrupt context.
+        */
+       raw_spin_lock_irqsave(&parent_ctx->lock, flags);
+       parent_ctx->rotate_disable = 1;
+       raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
+
        list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
                ret = inherit_task_group(event, parent, parent_ctx,
                                         child, ctxn, &inherited_all);
@@ -6143,6 +6221,10 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
                        break;
        }
 
+       raw_spin_lock_irqsave(&parent_ctx->lock, flags);
+       parent_ctx->rotate_disable = 0;
+       raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
+
        child_ctx = child->perf_event_ctxp[ctxn];
 
        if (child_ctx && inherited_all) {
@@ -6295,6 +6377,8 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
 
 void __init perf_event_init(void)
 {
+       int ret;
+
        perf_event_init_all_cpus();
        init_srcu_struct(&pmus_srcu);
        perf_pmu_register(&perf_swevent);
@@ -6302,4 +6386,7 @@ void __init perf_event_init(void)
        perf_pmu_register(&perf_task_clock);
        perf_tp_register();
        perf_cpu_notifier(perf_cpu_notify);
+
+       ret = init_hw_breakpoint();
+       WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
 }