]> git.karo-electronics.de Git - linux-beck.git/blobdiff - kernel/perf_event.c
perf: Simplify __perf_event_sync_stat
[linux-beck.git] / kernel / perf_event.c
index 7f29643c898549a5e523d07479d581edc4e870f5..af150bbcfc5b63ded3322cb22a949a74a1bcd7d7 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/anon_inodes.h>
 #include <linux/kernel_stat.h>
 #include <linux/perf_event.h>
+#include <linux/ftrace_event.h>
+#include <linux/hw_breakpoint.h>
 
 #include <asm/irq_regs.h>
 
@@ -1059,8 +1061,6 @@ static int context_equiv(struct perf_event_context *ctx1,
                && !ctx1->pin_count && !ctx2->pin_count;
 }
 
-static void __perf_event_read(void *event);
-
 static void __perf_event_sync_stat(struct perf_event *event,
                                     struct perf_event *next_event)
 {
@@ -1078,8 +1078,8 @@ static void __perf_event_sync_stat(struct perf_event *event,
         */
        switch (event->state) {
        case PERF_EVENT_STATE_ACTIVE:
-               __perf_event_read(event);
-               break;
+               event->pmu->read(event);
+               /* fall-through */
 
        case PERF_EVENT_STATE_INACTIVE:
                update_event_times(event);
@@ -1118,6 +1118,8 @@ static void perf_event_sync_stat(struct perf_event_context *ctx,
        if (!ctx->nr_stat)
                return;
 
+       update_context_time(ctx);
+
        event = list_first_entry(&ctx->event_list,
                                   struct perf_event, event_entry);
 
@@ -1161,8 +1163,6 @@ void perf_event_task_sched_out(struct task_struct *task,
        if (likely(!ctx || !cpuctx->task_ctx))
                return;
 
-       update_context_time(ctx);
-
        rcu_read_lock();
        parent = rcu_dereference(ctx->parent_ctx);
        next_ctx = next->perf_event_ctxp;
@@ -1515,7 +1515,6 @@ static void __perf_event_read(void *info)
        struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
-       unsigned long flags;
 
        /*
         * If this is a task context, we need to check whether it is
@@ -1527,12 +1526,10 @@ static void __perf_event_read(void *info)
        if (ctx->task && cpuctx->task_ctx != ctx)
                return;
 
-       local_irq_save(flags);
        if (ctx->is_active)
                update_context_time(ctx);
        event->pmu->read(event);
        update_event_times(event);
-       local_irq_restore(flags);
 }
 
 static u64 perf_event_read(struct perf_event *event)
@@ -1658,6 +1655,8 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
        return ERR_PTR(err);
 }
 
+static void perf_event_free_filter(struct perf_event *event);
+
 static void free_event_rcu(struct rcu_head *head)
 {
        struct perf_event *event;
@@ -1665,6 +1664,7 @@ static void free_event_rcu(struct rcu_head *head)
        event = container_of(head, struct perf_event, rcu_head);
        if (event->ns)
                put_pid_ns(event->ns);
+       perf_event_free_filter(event);
        kfree(event);
 }
 
@@ -1721,6 +1721,26 @@ static int perf_release(struct inode *inode, struct file *file)
        return 0;
 }
 
+int perf_event_release_kernel(struct perf_event *event)
+{
+       struct perf_event_context *ctx = event->ctx;
+
+       WARN_ON_ONCE(ctx->parent_ctx);
+       mutex_lock(&ctx->mutex);
+       perf_event_remove_from_context(event);
+       mutex_unlock(&ctx->mutex);
+
+       mutex_lock(&event->owner->perf_event_mutex);
+       list_del_init(&event->owner_entry);
+       mutex_unlock(&event->owner->perf_event_mutex);
+       put_task_struct(event->owner);
+
+       free_event(event);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(perf_event_release_kernel);
+
 static int perf_event_read_size(struct perf_event *event)
 {
        int entry = sizeof(u64); /* value */
@@ -1746,7 +1766,7 @@ static int perf_event_read_size(struct perf_event *event)
        return size;
 }
 
-static u64 perf_event_read_value(struct perf_event *event)
+u64 perf_event_read_value(struct perf_event *event)
 {
        struct perf_event *child;
        u64 total = 0;
@@ -1757,31 +1777,17 @@ static u64 perf_event_read_value(struct perf_event *event)
 
        return total;
 }
-
-static int perf_event_read_entry(struct perf_event *event,
-                                  u64 read_format, char __user *buf)
-{
-       int n = 0, count = 0;
-       u64 values[2];
-
-       values[n++] = perf_event_read_value(event);
-       if (read_format & PERF_FORMAT_ID)
-               values[n++] = primary_event_id(event);
-
-       count = n * sizeof(u64);
-
-       if (copy_to_user(buf, values, count))
-               return -EFAULT;
-
-       return count;
-}
+EXPORT_SYMBOL_GPL(perf_event_read_value);
 
 static int perf_event_read_group(struct perf_event *event,
                                   u64 read_format, char __user *buf)
 {
        struct perf_event *leader = event->group_leader, *sub;
-       int n = 0, size = 0, err = -EFAULT;
-       u64 values[3];
+       int n = 0, size = 0, ret = 0;
+       u64 values[5];
+       u64 count;
+
+       count = perf_event_read_value(leader);
 
        values[n++] = 1 + leader->nr_siblings;
        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
@@ -1792,28 +1798,33 @@ static int perf_event_read_group(struct perf_event *event,
                values[n++] = leader->total_time_running +
                        atomic64_read(&leader->child_total_time_running);
        }
+       values[n++] = count;
+       if (read_format & PERF_FORMAT_ID)
+               values[n++] = primary_event_id(leader);
 
        size = n * sizeof(u64);
 
        if (copy_to_user(buf, values, size))
                return -EFAULT;
 
-       err = perf_event_read_entry(leader, read_format, buf + size);
-       if (err < 0)
-               return err;
-
-       size += err;
+       ret += size;
 
        list_for_each_entry(sub, &leader->sibling_list, group_entry) {
-               err = perf_event_read_entry(sub, read_format,
-                               buf + size);
-               if (err < 0)
-                       return err;
+               n = 0;
+
+               values[n++] = perf_event_read_value(sub);
+               if (read_format & PERF_FORMAT_ID)
+                       values[n++] = primary_event_id(sub);
 
-               size += err;
+               size = n * sizeof(u64);
+
+               if (copy_to_user(buf + size, values, size))
+                       return -EFAULT;
+
+               ret += size;
        }
 
-       return size;
+       return ret;
 }
 
 static int perf_event_read_one(struct perf_event *event,
@@ -1974,7 +1985,8 @@ unlock:
        return ret;
 }
 
-int perf_event_set_output(struct perf_event *event, int output_fd);
+static int perf_event_set_output(struct perf_event *event, int output_fd);
+static int perf_event_set_filter(struct perf_event *event, void __user *arg);
 
 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
@@ -2002,6 +2014,9 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case PERF_EVENT_IOC_SET_OUTPUT:
                return perf_event_set_output(event, arg);
 
+       case PERF_EVENT_IOC_SET_FILTER:
+               return perf_event_set_filter(event, (void __user *)arg);
+
        default:
                return -ENOTTY;
        }
@@ -2666,20 +2681,21 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
 static void perf_output_lock(struct perf_output_handle *handle)
 {
        struct perf_mmap_data *data = handle->data;
-       int cpu;
+       int cur, cpu = get_cpu();
 
        handle->locked = 0;
 
-       local_irq_save(handle->flags);
-       cpu = smp_processor_id();
-
-       if (in_nmi() && atomic_read(&data->lock) == cpu)
-               return;
+       for (;;) {
+               cur = atomic_cmpxchg(&data->lock, -1, cpu);
+               if (cur == -1) {
+                       handle->locked = 1;
+                       break;
+               }
+               if (cur == cpu)
+                       break;
 
-       while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
                cpu_relax();
-
-       handle->locked = 1;
+       }
 }
 
 static void perf_output_unlock(struct perf_output_handle *handle)
@@ -2725,7 +2741,7 @@ again:
        if (atomic_xchg(&data->wakeup, 0))
                perf_output_wakeup(handle);
 out:
-       local_irq_restore(handle->flags);
+       put_cpu();
 }
 
 void perf_output_copy(struct perf_output_handle *handle,
@@ -3236,15 +3252,10 @@ static void perf_event_task_ctx(struct perf_event_context *ctx,
 {
        struct perf_event *event;
 
-       if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
-               return;
-
-       rcu_read_lock();
        list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
                if (perf_event_task_match(event))
                        perf_event_task_output(event, task_event);
        }
-       rcu_read_unlock();
 }
 
 static void perf_event_task_event(struct perf_task_event *task_event)
@@ -3252,11 +3263,11 @@ static void perf_event_task_event(struct perf_task_event *task_event)
        struct perf_cpu_context *cpuctx;
        struct perf_event_context *ctx = task_event->task_ctx;
 
+       rcu_read_lock();
        cpuctx = &get_cpu_var(perf_cpu_context);
        perf_event_task_ctx(&cpuctx->ctx, task_event);
        put_cpu_var(perf_cpu_context);
 
-       rcu_read_lock();
        if (!ctx)
                ctx = rcu_dereference(task_event->task->perf_event_ctxp);
        if (ctx)
@@ -3348,15 +3359,10 @@ static void perf_event_comm_ctx(struct perf_event_context *ctx,
 {
        struct perf_event *event;
 
-       if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
-               return;
-
-       rcu_read_lock();
        list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
                if (perf_event_comm_match(event))
                        perf_event_comm_output(event, comm_event);
        }
-       rcu_read_unlock();
 }
 
 static void perf_event_comm_event(struct perf_comm_event *comm_event)
@@ -3375,11 +3381,11 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
 
        comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
 
+       rcu_read_lock();
        cpuctx = &get_cpu_var(perf_cpu_context);
        perf_event_comm_ctx(&cpuctx->ctx, comm_event);
        put_cpu_var(perf_cpu_context);
 
-       rcu_read_lock();
        /*
         * doesn't really matter which of the child contexts the
         * events ends up in.
@@ -3472,15 +3478,10 @@ static void perf_event_mmap_ctx(struct perf_event_context *ctx,
 {
        struct perf_event *event;
 
-       if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
-               return;
-
-       rcu_read_lock();
        list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
                if (perf_event_mmap_match(event, mmap_event))
                        perf_event_mmap_output(event, mmap_event);
        }
-       rcu_read_unlock();
 }
 
 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
@@ -3536,11 +3537,11 @@ got_name:
 
        mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
 
+       rcu_read_lock();
        cpuctx = &get_cpu_var(perf_cpu_context);
        perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
        put_cpu_var(perf_cpu_context);
 
-       rcu_read_lock();
        /*
         * doesn't really matter which of the child contexts the
         * events ends up in.
@@ -3679,7 +3680,11 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
                        perf_event_disable(event);
        }
 
-       perf_event_output(event, nmi, data, regs);
+       if (event->overflow_handler)
+               event->overflow_handler(event, nmi, data, regs);
+       else
+               perf_event_output(event, nmi, data, regs);
+
        return ret;
 }
 
@@ -3724,16 +3729,16 @@ again:
        return nr;
 }
 
-static void perf_swevent_overflow(struct perf_event *event,
+static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
                                    int nmi, struct perf_sample_data *data,
                                    struct pt_regs *regs)
 {
        struct hw_perf_event *hwc = &event->hw;
        int throttle = 0;
-       u64 overflow;
 
        data->period = event->hw.last_period;
-       overflow = perf_swevent_set_period(event);
+       if (!overflow)
+               overflow = perf_swevent_set_period(event);
 
        if (hwc->interrupts == MAX_INTERRUPTS)
                return;
@@ -3766,14 +3771,19 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
 
        atomic64_add(nr, &event->count);
 
+       if (!regs)
+               return;
+
        if (!hwc->sample_period)
                return;
 
-       if (!regs)
+       if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
+               return perf_swevent_overflow(event, 1, nmi, data, regs);
+
+       if (atomic64_add_negative(nr, &hwc->period_left))
                return;
 
-       if (!atomic64_add_negative(nr, &hwc->period_left))
-               perf_swevent_overflow(event, nmi, data, regs);
+       perf_swevent_overflow(event, 0, nmi, data, regs);
 }
 
 static int perf_swevent_is_counting(struct perf_event *event)
@@ -3806,9 +3816,14 @@ static int perf_swevent_is_counting(struct perf_event *event)
        return 1;
 }
 
+static int perf_tp_event_match(struct perf_event *event,
+                               struct perf_sample_data *data);
+
 static int perf_swevent_match(struct perf_event *event,
                                enum perf_type_id type,
-                               u32 event_id, struct pt_regs *regs)
+                               u32 event_id,
+                               struct perf_sample_data *data,
+                               struct pt_regs *regs)
 {
        if (!perf_swevent_is_counting(event))
                return 0;
@@ -3826,6 +3841,10 @@ static int perf_swevent_match(struct perf_event *event,
                        return 0;
        }
 
+       if (event->attr.type == PERF_TYPE_TRACEPOINT &&
+           !perf_tp_event_match(event, data))
+               return 0;
+
        return 1;
 }
 
@@ -3837,15 +3856,10 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx,
 {
        struct perf_event *event;
 
-       if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
-               return;
-
-       rcu_read_lock();
        list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
-               if (perf_swevent_match(event, type, event_id, regs))
+               if (perf_swevent_match(event, type, event_id, data, regs))
                        perf_swevent_add(event, nr, nmi, data, regs);
        }
-       rcu_read_unlock();
 }
 
 static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx)
@@ -3877,9 +3891,9 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
        (*recursion)++;
        barrier();
 
+       rcu_read_lock();
        perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
                                 nr, nmi, data, regs);
-       rcu_read_lock();
        /*
         * doesn't really matter which of the child contexts the
         * events ends up in.
@@ -4108,6 +4122,7 @@ static const struct pmu perf_ops_task_clock = {
 };
 
 #ifdef CONFIG_EVENT_PROFILE
+
 void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
                          int entry_size)
 {
@@ -4131,8 +4146,15 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
 }
 EXPORT_SYMBOL_GPL(perf_tp_event);
 
-extern int ftrace_profile_enable(int);
-extern void ftrace_profile_disable(int);
+static int perf_tp_event_match(struct perf_event *event,
+                               struct perf_sample_data *data)
+{
+       void *record = data->raw->data;
+
+       if (likely(!event->filter) || filter_match_preds(event->filter, record))
+               return 1;
+       return 0;
+}
 
 static void tp_perf_event_destroy(struct perf_event *event)
 {
@@ -4157,11 +4179,97 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
 
        return &perf_ops_generic;
 }
+
+static int perf_event_set_filter(struct perf_event *event, void __user *arg)
+{
+       char *filter_str;
+       int ret;
+
+       if (event->attr.type != PERF_TYPE_TRACEPOINT)
+               return -EINVAL;
+
+       filter_str = strndup_user(arg, PAGE_SIZE);
+       if (IS_ERR(filter_str))
+               return PTR_ERR(filter_str);
+
+       ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
+
+       kfree(filter_str);
+       return ret;
+}
+
+static void perf_event_free_filter(struct perf_event *event)
+{
+       ftrace_profile_free_filter(event);
+}
+
 #else
+
+static int perf_tp_event_match(struct perf_event *event,
+                               struct perf_sample_data *data)
+{
+       return 1;
+}
+
 static const struct pmu *tp_perf_event_init(struct perf_event *event)
 {
        return NULL;
 }
+
+static int perf_event_set_filter(struct perf_event *event, void __user *arg)
+{
+       return -ENOENT;
+}
+
+static void perf_event_free_filter(struct perf_event *event)
+{
+}
+
+#endif /* CONFIG_EVENT_PROFILE */
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+static void bp_perf_event_destroy(struct perf_event *event)
+{
+       release_bp_slot(event);
+}
+
+static const struct pmu *bp_perf_event_init(struct perf_event *bp)
+{
+       int err;
+       /*
+        * The breakpoint is already filled if we haven't created the counter
+        * through perf syscall
+        * FIXME: manage to get trigerred to NULL if it comes from syscalls
+        */
+       if (!bp->callback)
+               err = register_perf_hw_breakpoint(bp);
+       else
+               err = __register_perf_hw_breakpoint(bp);
+       if (err)
+               return ERR_PTR(err);
+
+       bp->destroy = bp_perf_event_destroy;
+
+       return &perf_ops_bp;
+}
+
+void perf_bp_event(struct perf_event *bp, void *regs)
+{
+       /* TODO */
+}
+#else
+static void bp_perf_event_destroy(struct perf_event *event)
+{
+}
+
+static const struct pmu *bp_perf_event_init(struct perf_event *bp)
+{
+       return NULL;
+}
+
+void perf_bp_event(struct perf_event *bp, void *regs)
+{
+}
 #endif
 
 atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
@@ -4208,6 +4316,8 @@ static const struct pmu *sw_perf_event_init(struct perf_event *event)
        case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
        case PERF_COUNT_SW_CONTEXT_SWITCHES:
        case PERF_COUNT_SW_CPU_MIGRATIONS:
+       case PERF_COUNT_SW_ALIGNMENT_FAULTS:
+       case PERF_COUNT_SW_EMULATION_FAULTS:
                if (!event->parent) {
                        atomic_inc(&perf_swevent_enabled[event_id]);
                        event->destroy = sw_perf_event_destroy;
@@ -4228,6 +4338,7 @@ perf_event_alloc(struct perf_event_attr *attr,
                   struct perf_event_context *ctx,
                   struct perf_event *group_leader,
                   struct perf_event *parent_event,
+                  perf_callback_t callback,
                   gfp_t gfpflags)
 {
        const struct pmu *pmu;
@@ -4270,6 +4381,11 @@ perf_event_alloc(struct perf_event_attr *attr,
 
        event->state            = PERF_EVENT_STATE_INACTIVE;
 
+       if (!callback && parent_event)
+               callback = parent_event->callback;
+       
+       event->callback = callback;
+
        if (attr->disabled)
                event->state = PERF_EVENT_STATE_OFF;
 
@@ -4304,6 +4420,11 @@ perf_event_alloc(struct perf_event_attr *attr,
                pmu = tp_perf_event_init(event);
                break;
 
+       case PERF_TYPE_BREAKPOINT:
+               pmu = bp_perf_event_init(event);
+               break;
+
+
        default:
                break;
        }
@@ -4416,7 +4537,7 @@ err_size:
        goto out;
 }
 
-int perf_event_set_output(struct perf_event *event, int output_fd)
+static int perf_event_set_output(struct perf_event *event, int output_fd)
 {
        struct perf_event *output_event = NULL;
        struct file *output_file = NULL;
@@ -4546,7 +4667,7 @@ SYSCALL_DEFINE5(perf_event_open,
        }
 
        event = perf_event_alloc(&attr, cpu, ctx, group_leader,
-                                    NULL, GFP_KERNEL);
+                                    NULL, NULL, GFP_KERNEL);
        err = PTR_ERR(event);
        if (IS_ERR(event))
                goto err_put_context;
@@ -4594,6 +4715,58 @@ err_put_context:
        return err;
 }
 
+/**
+ * perf_event_create_kernel_counter
+ *
+ * @attr: attributes of the counter to create
+ * @cpu: cpu in which the counter is bound
+ * @pid: task to profile
+ */
+struct perf_event *
+perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
+                                pid_t pid, perf_callback_t callback)
+{
+       struct perf_event *event;
+       struct perf_event_context *ctx;
+       int err;
+
+       /*
+        * Get the target context (task or percpu):
+        */
+
+       ctx = find_get_context(pid, cpu);
+       if (IS_ERR(ctx))
+               return NULL;
+
+       event = perf_event_alloc(attr, cpu, ctx, NULL,
+                                    NULL, callback, GFP_KERNEL);
+       err = PTR_ERR(event);
+       if (IS_ERR(event))
+               goto err_put_context;
+
+       event->filp = NULL;
+       WARN_ON_ONCE(ctx->parent_ctx);
+       mutex_lock(&ctx->mutex);
+       perf_install_in_context(ctx, event, cpu);
+       ++ctx->generation;
+       mutex_unlock(&ctx->mutex);
+
+       event->owner = current;
+       get_task_struct(current);
+       mutex_lock(&current->perf_event_mutex);
+       list_add_tail(&event->owner_entry, &current->perf_event_list);
+       mutex_unlock(&current->perf_event_mutex);
+
+       return event;
+
+err_put_context:
+       if (err < 0)
+               put_ctx(ctx);
+
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
+
 /*
  * inherit a event from parent task to child task:
  */
@@ -4619,7 +4792,7 @@ inherit_event(struct perf_event *parent_event,
        child_event = perf_event_alloc(&parent_event->attr,
                                           parent_event->cpu, child_ctx,
                                           group_leader, parent_event,
-                                          GFP_KERNEL);
+                                          NULL, GFP_KERNEL);
        if (IS_ERR(child_event))
                return child_event;
        get_ctx(child_ctx);
@@ -4637,6 +4810,8 @@ inherit_event(struct perf_event *parent_event,
        if (parent_event->attr.freq)
                child_event->hw.sample_period = parent_event->hw.sample_period;
 
+       child_event->overflow_handler = parent_event->overflow_handler;
+
        /*
         * Link it up in the child's context:
         */