2 * Performance events core code:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/idr.h>
17 #include <linux/file.h>
18 #include <linux/poll.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/sysfs.h>
22 #include <linux/dcache.h>
23 #include <linux/percpu.h>
24 #include <linux/ptrace.h>
25 #include <linux/reboot.h>
26 #include <linux/vmstat.h>
27 #include <linux/device.h>
28 #include <linux/export.h>
29 #include <linux/vmalloc.h>
30 #include <linux/hardirq.h>
31 #include <linux/rculist.h>
32 #include <linux/uaccess.h>
33 #include <linux/syscalls.h>
34 #include <linux/anon_inodes.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/perf_event.h>
37 #include <linux/ftrace_event.h>
38 #include <linux/hw_breakpoint.h>
42 #include <asm/irq_regs.h>
44 struct remote_function_call {
45 struct task_struct *p;
46 int (*func)(void *info);
51 static void remote_function(void *data)
53 struct remote_function_call *tfc = data;
54 struct task_struct *p = tfc->p;
58 if (task_cpu(p) != smp_processor_id() || !task_curr(p))
62 tfc->ret = tfc->func(tfc->info);
66 * task_function_call - call a function on the cpu on which a task runs
67 * @p: the task to evaluate
68 * @func: the function to be called
69 * @info: the function call argument
71 * Calls the function @func when the task is currently running. This might
72 * be on the current CPU, which just calls the function directly
74 * returns: @func return value, or
75 * -ESRCH - when the process isn't running
76 * -EAGAIN - when the process moved away
79 task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
81 struct remote_function_call data = {
85 .ret = -ESRCH, /* No such (running) process */
89 smp_call_function_single(task_cpu(p), remote_function, &data, 1);
95 * cpu_function_call - call a function on the cpu
96 * @func: the function to be called
97 * @info: the function call argument
99 * Calls the function @func on the remote cpu.
101 * returns: @func return value or -ENXIO when the cpu is offline
103 static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
105 struct remote_function_call data = {
109 .ret = -ENXIO, /* No such CPU */
112 smp_call_function_single(cpu, remote_function, &data, 1);
117 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
118 PERF_FLAG_FD_OUTPUT |\
119 PERF_FLAG_PID_CGROUP)
122 EVENT_FLEXIBLE = 0x1,
124 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
128 * perf_sched_events : >0 events exist
129 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
131 struct jump_label_key perf_sched_events __read_mostly;
132 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
134 static atomic_t nr_mmap_events __read_mostly;
135 static atomic_t nr_comm_events __read_mostly;
136 static atomic_t nr_task_events __read_mostly;
138 static LIST_HEAD(pmus);
139 static DEFINE_MUTEX(pmus_lock);
140 static struct srcu_struct pmus_srcu;
143 * perf event paranoia level:
144 * -1 - not paranoid at all
145 * 0 - disallow raw tracepoint access for unpriv
146 * 1 - disallow cpu events for unpriv
147 * 2 - disallow kernel profiling for unpriv
149 int sysctl_perf_event_paranoid __read_mostly = 1;
151 /* Minimum for 512 kiB + 1 user control page */
152 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
155 * max perf event sample rate
157 #define DEFAULT_MAX_SAMPLE_RATE 100000
158 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
159 static int max_samples_per_tick __read_mostly =
160 DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
162 int perf_proc_update_handler(struct ctl_table *table, int write,
163 void __user *buffer, size_t *lenp,
166 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
171 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
176 static atomic64_t perf_event_id;
178 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
179 enum event_type_t event_type);
181 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
182 enum event_type_t event_type,
183 struct task_struct *task);
185 static void update_context_time(struct perf_event_context *ctx);
186 static u64 perf_event_time(struct perf_event *event);
188 void __weak perf_event_print_debug(void) { }
190 extern __weak const char *perf_pmu_name(void)
195 static inline u64 perf_clock(void)
197 return local_clock();
200 static inline struct perf_cpu_context *
201 __get_cpu_context(struct perf_event_context *ctx)
203 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
206 static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
207 struct perf_event_context *ctx)
209 raw_spin_lock(&cpuctx->ctx.lock);
211 raw_spin_lock(&ctx->lock);
214 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
215 struct perf_event_context *ctx)
218 raw_spin_unlock(&ctx->lock);
219 raw_spin_unlock(&cpuctx->ctx.lock);
222 #ifdef CONFIG_CGROUP_PERF
225 * Must ensure cgroup is pinned (css_get) before calling
226 * this function. In other words, we cannot call this function
227 * if there is no cgroup event for the current CPU context.
229 static inline struct perf_cgroup *
230 perf_cgroup_from_task(struct task_struct *task)
232 return container_of(task_subsys_state(task, perf_subsys_id),
233 struct perf_cgroup, css);
237 perf_cgroup_match(struct perf_event *event)
239 struct perf_event_context *ctx = event->ctx;
240 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
242 return !event->cgrp || event->cgrp == cpuctx->cgrp;
245 static inline void perf_get_cgroup(struct perf_event *event)
247 css_get(&event->cgrp->css);
250 static inline void perf_put_cgroup(struct perf_event *event)
252 css_put(&event->cgrp->css);
255 static inline void perf_detach_cgroup(struct perf_event *event)
257 perf_put_cgroup(event);
261 static inline int is_cgroup_event(struct perf_event *event)
263 return event->cgrp != NULL;
266 static inline u64 perf_cgroup_event_time(struct perf_event *event)
268 struct perf_cgroup_info *t;
270 t = per_cpu_ptr(event->cgrp->info, event->cpu);
274 static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
276 struct perf_cgroup_info *info;
281 info = this_cpu_ptr(cgrp->info);
283 info->time += now - info->timestamp;
284 info->timestamp = now;
287 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
289 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
291 __update_cgrp_time(cgrp_out);
294 static inline void update_cgrp_time_from_event(struct perf_event *event)
296 struct perf_cgroup *cgrp;
299 * ensure we access cgroup data only when needed and
300 * when we know the cgroup is pinned (css_get)
302 if (!is_cgroup_event(event))
305 cgrp = perf_cgroup_from_task(current);
307 * Do not update time when cgroup is not active
309 if (cgrp == event->cgrp)
310 __update_cgrp_time(event->cgrp);
314 perf_cgroup_set_timestamp(struct task_struct *task,
315 struct perf_event_context *ctx)
317 struct perf_cgroup *cgrp;
318 struct perf_cgroup_info *info;
321 * ctx->lock held by caller
322 * ensure we do not access cgroup data
323 * unless we have the cgroup pinned (css_get)
325 if (!task || !ctx->nr_cgroups)
328 cgrp = perf_cgroup_from_task(task);
329 info = this_cpu_ptr(cgrp->info);
330 info->timestamp = ctx->timestamp;
333 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
334 #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
337 * reschedule events based on the cgroup constraint of task.
339 * mode SWOUT : schedule out everything
340 * mode SWIN : schedule in based on cgroup for next
342 void perf_cgroup_switch(struct task_struct *task, int mode)
344 struct perf_cpu_context *cpuctx;
349 * disable interrupts to avoid geting nr_cgroup
350 * changes via __perf_event_disable(). Also
353 local_irq_save(flags);
356 * we reschedule only in the presence of cgroup
357 * constrained events.
361 list_for_each_entry_rcu(pmu, &pmus, entry) {
362 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
365 * perf_cgroup_events says at least one
366 * context on this CPU has cgroup events.
368 * ctx->nr_cgroups reports the number of cgroup
369 * events for a context.
371 if (cpuctx->ctx.nr_cgroups > 0) {
372 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
373 perf_pmu_disable(cpuctx->ctx.pmu);
375 if (mode & PERF_CGROUP_SWOUT) {
376 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
378 * must not be done before ctxswout due
379 * to event_filter_match() in event_sched_out()
384 if (mode & PERF_CGROUP_SWIN) {
385 WARN_ON_ONCE(cpuctx->cgrp);
386 /* set cgrp before ctxsw in to
387 * allow event_filter_match() to not
388 * have to pass task around
390 cpuctx->cgrp = perf_cgroup_from_task(task);
391 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
393 perf_pmu_enable(cpuctx->ctx.pmu);
394 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
400 local_irq_restore(flags);
403 static inline void perf_cgroup_sched_out(struct task_struct *task,
404 struct task_struct *next)
406 struct perf_cgroup *cgrp1;
407 struct perf_cgroup *cgrp2 = NULL;
410 * we come here when we know perf_cgroup_events > 0
412 cgrp1 = perf_cgroup_from_task(task);
415 * next is NULL when called from perf_event_enable_on_exec()
416 * that will systematically cause a cgroup_switch()
419 cgrp2 = perf_cgroup_from_task(next);
422 * only schedule out current cgroup events if we know
423 * that we are switching to a different cgroup. Otherwise,
424 * do no touch the cgroup events.
427 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
430 static inline void perf_cgroup_sched_in(struct task_struct *prev,
431 struct task_struct *task)
433 struct perf_cgroup *cgrp1;
434 struct perf_cgroup *cgrp2 = NULL;
437 * we come here when we know perf_cgroup_events > 0
439 cgrp1 = perf_cgroup_from_task(task);
441 /* prev can never be NULL */
442 cgrp2 = perf_cgroup_from_task(prev);
445 * only need to schedule in cgroup events if we are changing
446 * cgroup during ctxsw. Cgroup events were not scheduled
447 * out of ctxsw out if that was not the case.
450 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
453 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
454 struct perf_event_attr *attr,
455 struct perf_event *group_leader)
457 struct perf_cgroup *cgrp;
458 struct cgroup_subsys_state *css;
460 int ret = 0, fput_needed;
462 file = fget_light(fd, &fput_needed);
466 css = cgroup_css_from_dir(file, perf_subsys_id);
472 cgrp = container_of(css, struct perf_cgroup, css);
475 /* must be done before we fput() the file */
476 perf_get_cgroup(event);
479 * all events in a group must monitor
480 * the same cgroup because a task belongs
481 * to only one perf cgroup at a time
483 if (group_leader && group_leader->cgrp != cgrp) {
484 perf_detach_cgroup(event);
488 fput_light(file, fput_needed);
493 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
495 struct perf_cgroup_info *t;
496 t = per_cpu_ptr(event->cgrp->info, event->cpu);
497 event->shadow_ctx_time = now - t->timestamp;
501 perf_cgroup_defer_enabled(struct perf_event *event)
504 * when the current task's perf cgroup does not match
505 * the event's, we need to remember to call the
506 * perf_mark_enable() function the first time a task with
507 * a matching perf cgroup is scheduled in.
509 if (is_cgroup_event(event) && !perf_cgroup_match(event))
510 event->cgrp_defer_enabled = 1;
514 perf_cgroup_mark_enabled(struct perf_event *event,
515 struct perf_event_context *ctx)
517 struct perf_event *sub;
518 u64 tstamp = perf_event_time(event);
520 if (!event->cgrp_defer_enabled)
523 event->cgrp_defer_enabled = 0;
525 event->tstamp_enabled = tstamp - event->total_time_enabled;
526 list_for_each_entry(sub, &event->sibling_list, group_entry) {
527 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
528 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
529 sub->cgrp_defer_enabled = 0;
533 #else /* !CONFIG_CGROUP_PERF */
536 perf_cgroup_match(struct perf_event *event)
541 static inline void perf_detach_cgroup(struct perf_event *event)
544 static inline int is_cgroup_event(struct perf_event *event)
549 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
554 static inline void update_cgrp_time_from_event(struct perf_event *event)
558 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
562 static inline void perf_cgroup_sched_out(struct task_struct *task,
563 struct task_struct *next)
567 static inline void perf_cgroup_sched_in(struct task_struct *prev,
568 struct task_struct *task)
572 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
573 struct perf_event_attr *attr,
574 struct perf_event *group_leader)
580 perf_cgroup_set_timestamp(struct task_struct *task,
581 struct perf_event_context *ctx)
586 perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
591 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
595 static inline u64 perf_cgroup_event_time(struct perf_event *event)
601 perf_cgroup_defer_enabled(struct perf_event *event)
606 perf_cgroup_mark_enabled(struct perf_event *event,
607 struct perf_event_context *ctx)
612 void perf_pmu_disable(struct pmu *pmu)
614 int *count = this_cpu_ptr(pmu->pmu_disable_count);
616 pmu->pmu_disable(pmu);
619 void perf_pmu_enable(struct pmu *pmu)
621 int *count = this_cpu_ptr(pmu->pmu_disable_count);
623 pmu->pmu_enable(pmu);
626 static DEFINE_PER_CPU(struct list_head, rotation_list);
629 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
630 * because they're strictly cpu affine and rotate_start is called with IRQs
631 * disabled, while rotate_context is called from IRQ context.
633 static void perf_pmu_rotate_start(struct pmu *pmu)
635 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
636 struct list_head *head = &__get_cpu_var(rotation_list);
638 WARN_ON(!irqs_disabled());
640 if (list_empty(&cpuctx->rotation_list))
641 list_add(&cpuctx->rotation_list, head);
644 static void get_ctx(struct perf_event_context *ctx)
646 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
649 static void put_ctx(struct perf_event_context *ctx)
651 if (atomic_dec_and_test(&ctx->refcount)) {
653 put_ctx(ctx->parent_ctx);
655 put_task_struct(ctx->task);
656 kfree_rcu(ctx, rcu_head);
660 static void unclone_ctx(struct perf_event_context *ctx)
662 if (ctx->parent_ctx) {
663 put_ctx(ctx->parent_ctx);
664 ctx->parent_ctx = NULL;
668 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
671 * only top level events have the pid namespace they were created in
674 event = event->parent;
676 return task_tgid_nr_ns(p, event->ns);
679 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
682 * only top level events have the pid namespace they were created in
685 event = event->parent;
687 return task_pid_nr_ns(p, event->ns);
691 * If we inherit events we want to return the parent event id
694 static u64 primary_event_id(struct perf_event *event)
699 id = event->parent->id;
705 * Get the perf_event_context for a task and lock it.
706 * This has to cope with with the fact that until it is locked,
707 * the context could get moved to another task.
709 static struct perf_event_context *
710 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
712 struct perf_event_context *ctx;
716 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
719 * If this context is a clone of another, it might
720 * get swapped for another underneath us by
721 * perf_event_task_sched_out, though the
722 * rcu_read_lock() protects us from any context
723 * getting freed. Lock the context and check if it
724 * got swapped before we could get the lock, and retry
725 * if so. If we locked the right context, then it
726 * can't get swapped on us any more.
728 raw_spin_lock_irqsave(&ctx->lock, *flags);
729 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
730 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
734 if (!atomic_inc_not_zero(&ctx->refcount)) {
735 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
744 * Get the context for a task and increment its pin_count so it
745 * can't get swapped to another task. This also increments its
746 * reference count so that the context can't get freed.
748 static struct perf_event_context *
749 perf_pin_task_context(struct task_struct *task, int ctxn)
751 struct perf_event_context *ctx;
754 ctx = perf_lock_task_context(task, ctxn, &flags);
757 raw_spin_unlock_irqrestore(&ctx->lock, flags);
762 static void perf_unpin_context(struct perf_event_context *ctx)
766 raw_spin_lock_irqsave(&ctx->lock, flags);
768 raw_spin_unlock_irqrestore(&ctx->lock, flags);
772 * Update the record of the current time in a context.
774 static void update_context_time(struct perf_event_context *ctx)
776 u64 now = perf_clock();
778 ctx->time += now - ctx->timestamp;
779 ctx->timestamp = now;
782 static u64 perf_event_time(struct perf_event *event)
784 struct perf_event_context *ctx = event->ctx;
786 if (is_cgroup_event(event))
787 return perf_cgroup_event_time(event);
789 return ctx ? ctx->time : 0;
793 * Update the total_time_enabled and total_time_running fields for a event.
794 * The caller of this function needs to hold the ctx->lock.
796 static void update_event_times(struct perf_event *event)
798 struct perf_event_context *ctx = event->ctx;
801 if (event->state < PERF_EVENT_STATE_INACTIVE ||
802 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
805 * in cgroup mode, time_enabled represents
806 * the time the event was enabled AND active
807 * tasks were in the monitored cgroup. This is
808 * independent of the activity of the context as
809 * there may be a mix of cgroup and non-cgroup events.
811 * That is why we treat cgroup events differently
814 if (is_cgroup_event(event))
815 run_end = perf_event_time(event);
816 else if (ctx->is_active)
819 run_end = event->tstamp_stopped;
821 event->total_time_enabled = run_end - event->tstamp_enabled;
823 if (event->state == PERF_EVENT_STATE_INACTIVE)
824 run_end = event->tstamp_stopped;
826 run_end = perf_event_time(event);
828 event->total_time_running = run_end - event->tstamp_running;
833 * Update total_time_enabled and total_time_running for all events in a group.
835 static void update_group_times(struct perf_event *leader)
837 struct perf_event *event;
839 update_event_times(leader);
840 list_for_each_entry(event, &leader->sibling_list, group_entry)
841 update_event_times(event);
844 static struct list_head *
845 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
847 if (event->attr.pinned)
848 return &ctx->pinned_groups;
850 return &ctx->flexible_groups;
854 * Add a event from the lists for its context.
855 * Must be called with ctx->mutex and ctx->lock held.
858 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
860 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
861 event->attach_state |= PERF_ATTACH_CONTEXT;
864 * If we're a stand alone event or group leader, we go to the context
865 * list, group events are kept attached to the group so that
866 * perf_group_detach can, at all times, locate all siblings.
868 if (event->group_leader == event) {
869 struct list_head *list;
871 if (is_software_event(event))
872 event->group_flags |= PERF_GROUP_SOFTWARE;
874 list = ctx_group_list(event, ctx);
875 list_add_tail(&event->group_entry, list);
878 if (is_cgroup_event(event))
881 list_add_rcu(&event->event_entry, &ctx->event_list);
883 perf_pmu_rotate_start(ctx->pmu);
885 if (event->attr.inherit_stat)
890 * Called at perf_event creation and when events are attached/detached from a
893 static void perf_event__read_size(struct perf_event *event)
895 int entry = sizeof(u64); /* value */
899 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
902 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
905 if (event->attr.read_format & PERF_FORMAT_ID)
906 entry += sizeof(u64);
908 if (event->attr.read_format & PERF_FORMAT_GROUP) {
909 nr += event->group_leader->nr_siblings;
914 event->read_size = size;
917 static void perf_event__header_size(struct perf_event *event)
919 struct perf_sample_data *data;
920 u64 sample_type = event->attr.sample_type;
923 perf_event__read_size(event);
925 if (sample_type & PERF_SAMPLE_IP)
926 size += sizeof(data->ip);
928 if (sample_type & PERF_SAMPLE_ADDR)
929 size += sizeof(data->addr);
931 if (sample_type & PERF_SAMPLE_PERIOD)
932 size += sizeof(data->period);
934 if (sample_type & PERF_SAMPLE_READ)
935 size += event->read_size;
937 event->header_size = size;
940 static void perf_event__id_header_size(struct perf_event *event)
942 struct perf_sample_data *data;
943 u64 sample_type = event->attr.sample_type;
946 if (sample_type & PERF_SAMPLE_TID)
947 size += sizeof(data->tid_entry);
949 if (sample_type & PERF_SAMPLE_TIME)
950 size += sizeof(data->time);
952 if (sample_type & PERF_SAMPLE_ID)
953 size += sizeof(data->id);
955 if (sample_type & PERF_SAMPLE_STREAM_ID)
956 size += sizeof(data->stream_id);
958 if (sample_type & PERF_SAMPLE_CPU)
959 size += sizeof(data->cpu_entry);
961 event->id_header_size = size;
964 static void perf_group_attach(struct perf_event *event)
966 struct perf_event *group_leader = event->group_leader, *pos;
969 * We can have double attach due to group movement in perf_event_open.
971 if (event->attach_state & PERF_ATTACH_GROUP)
974 event->attach_state |= PERF_ATTACH_GROUP;
976 if (group_leader == event)
979 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
980 !is_software_event(event))
981 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
983 list_add_tail(&event->group_entry, &group_leader->sibling_list);
984 group_leader->nr_siblings++;
986 perf_event__header_size(group_leader);
988 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
989 perf_event__header_size(pos);
993 * Remove a event from the lists for its context.
994 * Must be called with ctx->mutex and ctx->lock held.
997 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
999 struct perf_cpu_context *cpuctx;
1001 * We can have double detach due to exit/hot-unplug + close.
1003 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1006 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1008 if (is_cgroup_event(event)) {
1010 cpuctx = __get_cpu_context(ctx);
1012 * if there are no more cgroup events
1013 * then cler cgrp to avoid stale pointer
1014 * in update_cgrp_time_from_cpuctx()
1016 if (!ctx->nr_cgroups)
1017 cpuctx->cgrp = NULL;
1021 if (event->attr.inherit_stat)
1024 list_del_rcu(&event->event_entry);
1026 if (event->group_leader == event)
1027 list_del_init(&event->group_entry);
1029 update_group_times(event);
1032 * If event was in error state, then keep it
1033 * that way, otherwise bogus counts will be
1034 * returned on read(). The only way to get out
1035 * of error state is by explicit re-enabling
1038 if (event->state > PERF_EVENT_STATE_OFF)
1039 event->state = PERF_EVENT_STATE_OFF;
1042 static void perf_group_detach(struct perf_event *event)
1044 struct perf_event *sibling, *tmp;
1045 struct list_head *list = NULL;
1048 * We can have double detach due to exit/hot-unplug + close.
1050 if (!(event->attach_state & PERF_ATTACH_GROUP))
1053 event->attach_state &= ~PERF_ATTACH_GROUP;
1056 * If this is a sibling, remove it from its group.
1058 if (event->group_leader != event) {
1059 list_del_init(&event->group_entry);
1060 event->group_leader->nr_siblings--;
1064 if (!list_empty(&event->group_entry))
1065 list = &event->group_entry;
1068 * If this was a group event with sibling events then
1069 * upgrade the siblings to singleton events by adding them
1070 * to whatever list we are on.
1072 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1074 list_move_tail(&sibling->group_entry, list);
1075 sibling->group_leader = sibling;
1077 /* Inherit group flags from the previous leader */
1078 sibling->group_flags = event->group_flags;
1082 perf_event__header_size(event->group_leader);
1084 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1085 perf_event__header_size(tmp);
1089 event_filter_match(struct perf_event *event)
1091 return (event->cpu == -1 || event->cpu == smp_processor_id())
1092 && perf_cgroup_match(event);
1096 event_sched_out(struct perf_event *event,
1097 struct perf_cpu_context *cpuctx,
1098 struct perf_event_context *ctx)
1100 u64 tstamp = perf_event_time(event);
1103 * An event which could not be activated because of
1104 * filter mismatch still needs to have its timings
1105 * maintained, otherwise bogus information is return
1106 * via read() for time_enabled, time_running:
1108 if (event->state == PERF_EVENT_STATE_INACTIVE
1109 && !event_filter_match(event)) {
1110 delta = tstamp - event->tstamp_stopped;
1111 event->tstamp_running += delta;
1112 event->tstamp_stopped = tstamp;
1115 if (event->state != PERF_EVENT_STATE_ACTIVE)
1118 event->state = PERF_EVENT_STATE_INACTIVE;
1119 if (event->pending_disable) {
1120 event->pending_disable = 0;
1121 event->state = PERF_EVENT_STATE_OFF;
1123 event->tstamp_stopped = tstamp;
1124 event->pmu->del(event, 0);
1127 if (!is_software_event(event))
1128 cpuctx->active_oncpu--;
1130 if (event->attr.exclusive || !cpuctx->active_oncpu)
1131 cpuctx->exclusive = 0;
1135 group_sched_out(struct perf_event *group_event,
1136 struct perf_cpu_context *cpuctx,
1137 struct perf_event_context *ctx)
1139 struct perf_event *event;
1140 int state = group_event->state;
1142 event_sched_out(group_event, cpuctx, ctx);
1145 * Schedule out siblings (if any):
1147 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1148 event_sched_out(event, cpuctx, ctx);
1150 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1151 cpuctx->exclusive = 0;
1155 * Cross CPU call to remove a performance event
1157 * We disable the event on the hardware level first. After that we
1158 * remove it from the context list.
1160 static int __perf_remove_from_context(void *info)
1162 struct perf_event *event = info;
1163 struct perf_event_context *ctx = event->ctx;
1164 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1166 raw_spin_lock(&ctx->lock);
1167 event_sched_out(event, cpuctx, ctx);
1168 list_del_event(event, ctx);
1169 if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1171 cpuctx->task_ctx = NULL;
1173 raw_spin_unlock(&ctx->lock);
1180 * Remove the event from a task's (or a CPU's) list of events.
1182 * CPU events are removed with a smp call. For task events we only
1183 * call when the task is on a CPU.
1185 * If event->ctx is a cloned context, callers must make sure that
1186 * every task struct that event->ctx->task could possibly point to
1187 * remains valid. This is OK when called from perf_release since
1188 * that only calls us on the top-level context, which can't be a clone.
1189 * When called from perf_event_exit_task, it's OK because the
1190 * context has been detached from its task.
1192 static void perf_remove_from_context(struct perf_event *event)
1194 struct perf_event_context *ctx = event->ctx;
1195 struct task_struct *task = ctx->task;
1197 lockdep_assert_held(&ctx->mutex);
1201 * Per cpu events are removed via an smp call and
1202 * the removal is always successful.
1204 cpu_function_call(event->cpu, __perf_remove_from_context, event);
1209 if (!task_function_call(task, __perf_remove_from_context, event))
1212 raw_spin_lock_irq(&ctx->lock);
1214 * If we failed to find a running task, but find the context active now
1215 * that we've acquired the ctx->lock, retry.
1217 if (ctx->is_active) {
1218 raw_spin_unlock_irq(&ctx->lock);
1223 * Since the task isn't running, its safe to remove the event, us
1224 * holding the ctx->lock ensures the task won't get scheduled in.
1226 list_del_event(event, ctx);
1227 raw_spin_unlock_irq(&ctx->lock);
1231 * Cross CPU call to disable a performance event
1233 static int __perf_event_disable(void *info)
1235 struct perf_event *event = info;
1236 struct perf_event_context *ctx = event->ctx;
1237 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1240 * If this is a per-task event, need to check whether this
1241 * event's task is the current task on this cpu.
1243 * Can trigger due to concurrent perf_event_context_sched_out()
1244 * flipping contexts around.
1246 if (ctx->task && cpuctx->task_ctx != ctx)
1249 raw_spin_lock(&ctx->lock);
1252 * If the event is on, turn it off.
1253 * If it is in error state, leave it in error state.
1255 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
1256 update_context_time(ctx);
1257 update_cgrp_time_from_event(event);
1258 update_group_times(event);
1259 if (event == event->group_leader)
1260 group_sched_out(event, cpuctx, ctx);
1262 event_sched_out(event, cpuctx, ctx);
1263 event->state = PERF_EVENT_STATE_OFF;
1266 raw_spin_unlock(&ctx->lock);
1274 * If event->ctx is a cloned context, callers must make sure that
1275 * every task struct that event->ctx->task could possibly point to
1276 * remains valid. This condition is satisifed when called through
1277 * perf_event_for_each_child or perf_event_for_each because they
1278 * hold the top-level event's child_mutex, so any descendant that
1279 * goes to exit will block in sync_child_event.
1280 * When called from perf_pending_event it's OK because event->ctx
1281 * is the current context on this CPU and preemption is disabled,
1282 * hence we can't get into perf_event_task_sched_out for this context.
1284 void perf_event_disable(struct perf_event *event)
1286 struct perf_event_context *ctx = event->ctx;
1287 struct task_struct *task = ctx->task;
1291 * Disable the event on the cpu that it's on
1293 cpu_function_call(event->cpu, __perf_event_disable, event);
1298 if (!task_function_call(task, __perf_event_disable, event))
1301 raw_spin_lock_irq(&ctx->lock);
1303 * If the event is still active, we need to retry the cross-call.
1305 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1306 raw_spin_unlock_irq(&ctx->lock);
1308 * Reload the task pointer, it might have been changed by
1309 * a concurrent perf_event_context_sched_out().
1316 * Since we have the lock this context can't be scheduled
1317 * in, so we can change the state safely.
1319 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1320 update_group_times(event);
1321 event->state = PERF_EVENT_STATE_OFF;
1323 raw_spin_unlock_irq(&ctx->lock);
1326 static void perf_set_shadow_time(struct perf_event *event,
1327 struct perf_event_context *ctx,
1331 * use the correct time source for the time snapshot
1333 * We could get by without this by leveraging the
1334 * fact that to get to this function, the caller
1335 * has most likely already called update_context_time()
1336 * and update_cgrp_time_xx() and thus both timestamp
1337 * are identical (or very close). Given that tstamp is,
1338 * already adjusted for cgroup, we could say that:
1339 * tstamp - ctx->timestamp
1341 * tstamp - cgrp->timestamp.
1343 * Then, in perf_output_read(), the calculation would
1344 * work with no changes because:
1345 * - event is guaranteed scheduled in
1346 * - no scheduled out in between
1347 * - thus the timestamp would be the same
1349 * But this is a bit hairy.
1351 * So instead, we have an explicit cgroup call to remain
1352 * within the time time source all along. We believe it
1353 * is cleaner and simpler to understand.
1355 if (is_cgroup_event(event))
1356 perf_cgroup_set_shadow_time(event, tstamp);
1358 event->shadow_ctx_time = tstamp - ctx->timestamp;
1361 #define MAX_INTERRUPTS (~0ULL)
1363 static void perf_log_throttle(struct perf_event *event, int enable);
1366 event_sched_in(struct perf_event *event,
1367 struct perf_cpu_context *cpuctx,
1368 struct perf_event_context *ctx)
1370 u64 tstamp = perf_event_time(event);
1372 if (event->state <= PERF_EVENT_STATE_OFF)
1375 event->state = PERF_EVENT_STATE_ACTIVE;
1376 event->oncpu = smp_processor_id();
1379 * Unthrottle events, since we scheduled we might have missed several
1380 * ticks already, also for a heavily scheduling task there is little
1381 * guarantee it'll get a tick in a timely manner.
1383 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1384 perf_log_throttle(event, 1);
1385 event->hw.interrupts = 0;
1389 * The new state must be visible before we turn it on in the hardware:
1393 if (event->pmu->add(event, PERF_EF_START)) {
1394 event->state = PERF_EVENT_STATE_INACTIVE;
1399 event->tstamp_running += tstamp - event->tstamp_stopped;
1401 perf_set_shadow_time(event, ctx, tstamp);
1403 if (!is_software_event(event))
1404 cpuctx->active_oncpu++;
1407 if (event->attr.exclusive)
1408 cpuctx->exclusive = 1;
1414 group_sched_in(struct perf_event *group_event,
1415 struct perf_cpu_context *cpuctx,
1416 struct perf_event_context *ctx)
1418 struct perf_event *event, *partial_group = NULL;
1419 struct pmu *pmu = group_event->pmu;
1420 u64 now = ctx->time;
1421 bool simulate = false;
1423 if (group_event->state == PERF_EVENT_STATE_OFF)
1426 pmu->start_txn(pmu);
1428 if (event_sched_in(group_event, cpuctx, ctx)) {
1429 pmu->cancel_txn(pmu);
1434 * Schedule in siblings as one group (if any):
1436 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1437 if (event_sched_in(event, cpuctx, ctx)) {
1438 partial_group = event;
1443 if (!pmu->commit_txn(pmu))
1448 * Groups can be scheduled in as one unit only, so undo any
1449 * partial group before returning:
1450 * The events up to the failed event are scheduled out normally,
1451 * tstamp_stopped will be updated.
1453 * The failed events and the remaining siblings need to have
1454 * their timings updated as if they had gone thru event_sched_in()
1455 * and event_sched_out(). This is required to get consistent timings
1456 * across the group. This also takes care of the case where the group
1457 * could never be scheduled by ensuring tstamp_stopped is set to mark
1458 * the time the event was actually stopped, such that time delta
1459 * calculation in update_event_times() is correct.
1461 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1462 if (event == partial_group)
1466 event->tstamp_running += now - event->tstamp_stopped;
1467 event->tstamp_stopped = now;
1469 event_sched_out(event, cpuctx, ctx);
1472 event_sched_out(group_event, cpuctx, ctx);
1474 pmu->cancel_txn(pmu);
1480 * Work out whether we can put this event group on the CPU now.
1482 static int group_can_go_on(struct perf_event *event,
1483 struct perf_cpu_context *cpuctx,
1487 * Groups consisting entirely of software events can always go on.
1489 if (event->group_flags & PERF_GROUP_SOFTWARE)
1492 * If an exclusive group is already on, no other hardware
1495 if (cpuctx->exclusive)
1498 * If this group is exclusive and there are already
1499 * events on the CPU, it can't go on.
1501 if (event->attr.exclusive && cpuctx->active_oncpu)
1504 * Otherwise, try to add it if all previous groups were able
1510 static void add_event_to_ctx(struct perf_event *event,
1511 struct perf_event_context *ctx)
1513 u64 tstamp = perf_event_time(event);
1515 list_add_event(event, ctx);
1516 perf_group_attach(event);
1517 event->tstamp_enabled = tstamp;
1518 event->tstamp_running = tstamp;
1519 event->tstamp_stopped = tstamp;
1522 static void task_ctx_sched_out(struct perf_event_context *ctx);
1524 ctx_sched_in(struct perf_event_context *ctx,
1525 struct perf_cpu_context *cpuctx,
1526 enum event_type_t event_type,
1527 struct task_struct *task);
1529 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
1530 struct perf_event_context *ctx,
1531 struct task_struct *task)
1533 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
1535 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
1536 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1538 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
1542 * Cross CPU call to install and enable a performance event
1544 * Must be called with ctx->mutex held
1546 static int __perf_install_in_context(void *info)
1548 struct perf_event *event = info;
1549 struct perf_event_context *ctx = event->ctx;
1550 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1551 struct perf_event_context *task_ctx = cpuctx->task_ctx;
1552 struct task_struct *task = current;
1554 perf_ctx_lock(cpuctx, task_ctx);
1555 perf_pmu_disable(cpuctx->ctx.pmu);
1558 * If there was an active task_ctx schedule it out.
1561 task_ctx_sched_out(task_ctx);
1564 * If the context we're installing events in is not the
1565 * active task_ctx, flip them.
1567 if (ctx->task && task_ctx != ctx) {
1569 raw_spin_unlock(&task_ctx->lock);
1570 raw_spin_lock(&ctx->lock);
1575 cpuctx->task_ctx = task_ctx;
1576 task = task_ctx->task;
1579 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
1581 update_context_time(ctx);
1583 * update cgrp time only if current cgrp
1584 * matches event->cgrp. Must be done before
1585 * calling add_event_to_ctx()
1587 update_cgrp_time_from_event(event);
1589 add_event_to_ctx(event, ctx);
1592 * Schedule everything back in
1594 perf_event_sched_in(cpuctx, task_ctx, task);
1596 perf_pmu_enable(cpuctx->ctx.pmu);
1597 perf_ctx_unlock(cpuctx, task_ctx);
1603 * Attach a performance event to a context
1605 * First we add the event to the list with the hardware enable bit
1606 * in event->hw_config cleared.
1608 * If the event is attached to a task which is on a CPU we use a smp
1609 * call to enable it in the task context. The task might have been
1610 * scheduled away, but we check this in the smp call again.
1613 perf_install_in_context(struct perf_event_context *ctx,
1614 struct perf_event *event,
1617 struct task_struct *task = ctx->task;
1619 lockdep_assert_held(&ctx->mutex);
1625 * Per cpu events are installed via an smp call and
1626 * the install is always successful.
1628 cpu_function_call(cpu, __perf_install_in_context, event);
1633 if (!task_function_call(task, __perf_install_in_context, event))
1636 raw_spin_lock_irq(&ctx->lock);
1638 * If we failed to find a running task, but find the context active now
1639 * that we've acquired the ctx->lock, retry.
1641 if (ctx->is_active) {
1642 raw_spin_unlock_irq(&ctx->lock);
1647 * Since the task isn't running, its safe to add the event, us holding
1648 * the ctx->lock ensures the task won't get scheduled in.
1650 add_event_to_ctx(event, ctx);
1651 raw_spin_unlock_irq(&ctx->lock);
1655 * Put a event into inactive state and update time fields.
1656 * Enabling the leader of a group effectively enables all
1657 * the group members that aren't explicitly disabled, so we
1658 * have to update their ->tstamp_enabled also.
1659 * Note: this works for group members as well as group leaders
1660 * since the non-leader members' sibling_lists will be empty.
1662 static void __perf_event_mark_enabled(struct perf_event *event,
1663 struct perf_event_context *ctx)
1665 struct perf_event *sub;
1666 u64 tstamp = perf_event_time(event);
1668 event->state = PERF_EVENT_STATE_INACTIVE;
1669 event->tstamp_enabled = tstamp - event->total_time_enabled;
1670 list_for_each_entry(sub, &event->sibling_list, group_entry) {
1671 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1672 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
1677 * Cross CPU call to enable a performance event
1679 static int __perf_event_enable(void *info)
1681 struct perf_event *event = info;
1682 struct perf_event_context *ctx = event->ctx;
1683 struct perf_event *leader = event->group_leader;
1684 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1687 if (WARN_ON_ONCE(!ctx->is_active))
1690 raw_spin_lock(&ctx->lock);
1691 update_context_time(ctx);
1693 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1697 * set current task's cgroup time reference point
1699 perf_cgroup_set_timestamp(current, ctx);
1701 __perf_event_mark_enabled(event, ctx);
1703 if (!event_filter_match(event)) {
1704 if (is_cgroup_event(event))
1705 perf_cgroup_defer_enabled(event);
1710 * If the event is in a group and isn't the group leader,
1711 * then don't put it on unless the group is on.
1713 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
1716 if (!group_can_go_on(event, cpuctx, 1)) {
1719 if (event == leader)
1720 err = group_sched_in(event, cpuctx, ctx);
1722 err = event_sched_in(event, cpuctx, ctx);
1727 * If this event can't go on and it's part of a
1728 * group, then the whole group has to come off.
1730 if (leader != event)
1731 group_sched_out(leader, cpuctx, ctx);
1732 if (leader->attr.pinned) {
1733 update_group_times(leader);
1734 leader->state = PERF_EVENT_STATE_ERROR;
1739 raw_spin_unlock(&ctx->lock);
1747 * If event->ctx is a cloned context, callers must make sure that
1748 * every task struct that event->ctx->task could possibly point to
1749 * remains valid. This condition is satisfied when called through
1750 * perf_event_for_each_child or perf_event_for_each as described
1751 * for perf_event_disable.
1753 void perf_event_enable(struct perf_event *event)
1755 struct perf_event_context *ctx = event->ctx;
1756 struct task_struct *task = ctx->task;
1760 * Enable the event on the cpu that it's on
1762 cpu_function_call(event->cpu, __perf_event_enable, event);
1766 raw_spin_lock_irq(&ctx->lock);
1767 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1771 * If the event is in error state, clear that first.
1772 * That way, if we see the event in error state below, we
1773 * know that it has gone back into error state, as distinct
1774 * from the task having been scheduled away before the
1775 * cross-call arrived.
1777 if (event->state == PERF_EVENT_STATE_ERROR)
1778 event->state = PERF_EVENT_STATE_OFF;
1781 if (!ctx->is_active) {
1782 __perf_event_mark_enabled(event, ctx);
1786 raw_spin_unlock_irq(&ctx->lock);
1788 if (!task_function_call(task, __perf_event_enable, event))
1791 raw_spin_lock_irq(&ctx->lock);
1794 * If the context is active and the event is still off,
1795 * we need to retry the cross-call.
1797 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
1799 * task could have been flipped by a concurrent
1800 * perf_event_context_sched_out()
1807 raw_spin_unlock_irq(&ctx->lock);
1810 int perf_event_refresh(struct perf_event *event, int refresh)
1813 * not supported on inherited events
1815 if (event->attr.inherit || !is_sampling_event(event))
1818 atomic_add(refresh, &event->event_limit);
1819 perf_event_enable(event);
1823 EXPORT_SYMBOL_GPL(perf_event_refresh);
1825 static void ctx_sched_out(struct perf_event_context *ctx,
1826 struct perf_cpu_context *cpuctx,
1827 enum event_type_t event_type)
1829 struct perf_event *event;
1830 int is_active = ctx->is_active;
1832 ctx->is_active &= ~event_type;
1833 if (likely(!ctx->nr_events))
1836 update_context_time(ctx);
1837 update_cgrp_time_from_cpuctx(cpuctx);
1838 if (!ctx->nr_active)
1841 perf_pmu_disable(ctx->pmu);
1842 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
1843 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1844 group_sched_out(event, cpuctx, ctx);
1847 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
1848 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1849 group_sched_out(event, cpuctx, ctx);
1851 perf_pmu_enable(ctx->pmu);
1855 * Test whether two contexts are equivalent, i.e. whether they
1856 * have both been cloned from the same version of the same context
1857 * and they both have the same number of enabled events.
1858 * If the number of enabled events is the same, then the set
1859 * of enabled events should be the same, because these are both
1860 * inherited contexts, therefore we can't access individual events
1861 * in them directly with an fd; we can only enable/disable all
1862 * events via prctl, or enable/disable all events in a family
1863 * via ioctl, which will have the same effect on both contexts.
1865 static int context_equiv(struct perf_event_context *ctx1,
1866 struct perf_event_context *ctx2)
1868 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1869 && ctx1->parent_gen == ctx2->parent_gen
1870 && !ctx1->pin_count && !ctx2->pin_count;
1873 static void __perf_event_sync_stat(struct perf_event *event,
1874 struct perf_event *next_event)
1878 if (!event->attr.inherit_stat)
1882 * Update the event value, we cannot use perf_event_read()
1883 * because we're in the middle of a context switch and have IRQs
1884 * disabled, which upsets smp_call_function_single(), however
1885 * we know the event must be on the current CPU, therefore we
1886 * don't need to use it.
1888 switch (event->state) {
1889 case PERF_EVENT_STATE_ACTIVE:
1890 event->pmu->read(event);
1893 case PERF_EVENT_STATE_INACTIVE:
1894 update_event_times(event);
1902 * In order to keep per-task stats reliable we need to flip the event
1903 * values when we flip the contexts.
1905 value = local64_read(&next_event->count);
1906 value = local64_xchg(&event->count, value);
1907 local64_set(&next_event->count, value);
1909 swap(event->total_time_enabled, next_event->total_time_enabled);
1910 swap(event->total_time_running, next_event->total_time_running);
1913 * Since we swizzled the values, update the user visible data too.
1915 perf_event_update_userpage(event);
1916 perf_event_update_userpage(next_event);
1919 #define list_next_entry(pos, member) \
1920 list_entry(pos->member.next, typeof(*pos), member)
1922 static void perf_event_sync_stat(struct perf_event_context *ctx,
1923 struct perf_event_context *next_ctx)
1925 struct perf_event *event, *next_event;
1930 update_context_time(ctx);
1932 event = list_first_entry(&ctx->event_list,
1933 struct perf_event, event_entry);
1935 next_event = list_first_entry(&next_ctx->event_list,
1936 struct perf_event, event_entry);
1938 while (&event->event_entry != &ctx->event_list &&
1939 &next_event->event_entry != &next_ctx->event_list) {
1941 __perf_event_sync_stat(event, next_event);
1943 event = list_next_entry(event, event_entry);
1944 next_event = list_next_entry(next_event, event_entry);
1948 static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1949 struct task_struct *next)
1951 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
1952 struct perf_event_context *next_ctx;
1953 struct perf_event_context *parent;
1954 struct perf_cpu_context *cpuctx;
1960 cpuctx = __get_cpu_context(ctx);
1961 if (!cpuctx->task_ctx)
1965 parent = rcu_dereference(ctx->parent_ctx);
1966 next_ctx = next->perf_event_ctxp[ctxn];
1967 if (parent && next_ctx &&
1968 rcu_dereference(next_ctx->parent_ctx) == parent) {
1970 * Looks like the two contexts are clones, so we might be
1971 * able to optimize the context switch. We lock both
1972 * contexts and check that they are clones under the
1973 * lock (including re-checking that neither has been
1974 * uncloned in the meantime). It doesn't matter which
1975 * order we take the locks because no other cpu could
1976 * be trying to lock both of these tasks.
1978 raw_spin_lock(&ctx->lock);
1979 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1980 if (context_equiv(ctx, next_ctx)) {
1982 * XXX do we need a memory barrier of sorts
1983 * wrt to rcu_dereference() of perf_event_ctxp
1985 task->perf_event_ctxp[ctxn] = next_ctx;
1986 next->perf_event_ctxp[ctxn] = ctx;
1988 next_ctx->task = task;
1991 perf_event_sync_stat(ctx, next_ctx);
1993 raw_spin_unlock(&next_ctx->lock);
1994 raw_spin_unlock(&ctx->lock);
1999 raw_spin_lock(&ctx->lock);
2000 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2001 cpuctx->task_ctx = NULL;
2002 raw_spin_unlock(&ctx->lock);
2006 #define for_each_task_context_nr(ctxn) \
2007 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2010 * Called from scheduler to remove the events of the current task,
2011 * with interrupts disabled.
2013 * We stop each event and update the event value in event->count.
2015 * This does not protect us against NMI, but disable()
2016 * sets the disabled bit in the control field of event _before_
2017 * accessing the event control register. If a NMI hits, then it will
2018 * not restart the event.
2020 void __perf_event_task_sched_out(struct task_struct *task,
2021 struct task_struct *next)
2025 for_each_task_context_nr(ctxn)
2026 perf_event_context_sched_out(task, ctxn, next);
2029 * if cgroup events exist on this CPU, then we need
2030 * to check if we have to switch out PMU state.
2031 * cgroup event are system-wide mode only
2033 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2034 perf_cgroup_sched_out(task, next);
2037 static void task_ctx_sched_out(struct perf_event_context *ctx)
2039 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2041 if (!cpuctx->task_ctx)
2044 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2047 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2048 cpuctx->task_ctx = NULL;
2052 * Called with IRQs disabled
2054 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2055 enum event_type_t event_type)
2057 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
2061 ctx_pinned_sched_in(struct perf_event_context *ctx,
2062 struct perf_cpu_context *cpuctx)
2064 struct perf_event *event;
2066 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2067 if (event->state <= PERF_EVENT_STATE_OFF)
2069 if (!event_filter_match(event))
2072 /* may need to reset tstamp_enabled */
2073 if (is_cgroup_event(event))
2074 perf_cgroup_mark_enabled(event, ctx);
2076 if (group_can_go_on(event, cpuctx, 1))
2077 group_sched_in(event, cpuctx, ctx);
2080 * If this pinned group hasn't been scheduled,
2081 * put it in error state.
2083 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2084 update_group_times(event);
2085 event->state = PERF_EVENT_STATE_ERROR;
2091 ctx_flexible_sched_in(struct perf_event_context *ctx,
2092 struct perf_cpu_context *cpuctx)
2094 struct perf_event *event;
2097 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2098 /* Ignore events in OFF or ERROR state */
2099 if (event->state <= PERF_EVENT_STATE_OFF)
2102 * Listen to the 'cpu' scheduling filter constraint
2105 if (!event_filter_match(event))
2108 /* may need to reset tstamp_enabled */
2109 if (is_cgroup_event(event))
2110 perf_cgroup_mark_enabled(event, ctx);
2112 if (group_can_go_on(event, cpuctx, can_add_hw)) {
2113 if (group_sched_in(event, cpuctx, ctx))
2120 ctx_sched_in(struct perf_event_context *ctx,
2121 struct perf_cpu_context *cpuctx,
2122 enum event_type_t event_type,
2123 struct task_struct *task)
2126 int is_active = ctx->is_active;
2128 ctx->is_active |= event_type;
2129 if (likely(!ctx->nr_events))
2133 ctx->timestamp = now;
2134 perf_cgroup_set_timestamp(task, ctx);
2136 * First go through the list and put on any pinned groups
2137 * in order to give them the best chance of going on.
2139 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
2140 ctx_pinned_sched_in(ctx, cpuctx);
2142 /* Then walk through the lower prio flexible groups */
2143 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
2144 ctx_flexible_sched_in(ctx, cpuctx);
2147 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
2148 enum event_type_t event_type,
2149 struct task_struct *task)
2151 struct perf_event_context *ctx = &cpuctx->ctx;
2153 ctx_sched_in(ctx, cpuctx, event_type, task);
2156 static void perf_event_context_sched_in(struct perf_event_context *ctx,
2157 struct task_struct *task)
2159 struct perf_cpu_context *cpuctx;
2161 cpuctx = __get_cpu_context(ctx);
2162 if (cpuctx->task_ctx == ctx)
2165 perf_ctx_lock(cpuctx, ctx);
2166 perf_pmu_disable(ctx->pmu);
2168 * We want to keep the following priority order:
2169 * cpu pinned (that don't need to move), task pinned,
2170 * cpu flexible, task flexible.
2172 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2174 perf_event_sched_in(cpuctx, ctx, task);
2176 cpuctx->task_ctx = ctx;
2178 perf_pmu_enable(ctx->pmu);
2179 perf_ctx_unlock(cpuctx, ctx);
2182 * Since these rotations are per-cpu, we need to ensure the
2183 * cpu-context we got scheduled on is actually rotating.
2185 perf_pmu_rotate_start(ctx->pmu);
2189 * Called from scheduler to add the events of the current task
2190 * with interrupts disabled.
2192 * We restore the event value and then enable it.
2194 * This does not protect us against NMI, but enable()
2195 * sets the enabled bit in the control field of event _before_
2196 * accessing the event control register. If a NMI hits, then it will
2197 * keep the event running.
2199 void __perf_event_task_sched_in(struct task_struct *prev,
2200 struct task_struct *task)
2202 struct perf_event_context *ctx;
2205 for_each_task_context_nr(ctxn) {
2206 ctx = task->perf_event_ctxp[ctxn];
2210 perf_event_context_sched_in(ctx, task);
2213 * if cgroup events exist on this CPU, then we need
2214 * to check if we have to switch in PMU state.
2215 * cgroup event are system-wide mode only
2217 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2218 perf_cgroup_sched_in(prev, task);
2221 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2223 u64 frequency = event->attr.sample_freq;
2224 u64 sec = NSEC_PER_SEC;
2225 u64 divisor, dividend;
2227 int count_fls, nsec_fls, frequency_fls, sec_fls;
2229 count_fls = fls64(count);
2230 nsec_fls = fls64(nsec);
2231 frequency_fls = fls64(frequency);
2235 * We got @count in @nsec, with a target of sample_freq HZ
2236 * the target period becomes:
2239 * period = -------------------
2240 * @nsec * sample_freq
2245 * Reduce accuracy by one bit such that @a and @b converge
2246 * to a similar magnitude.
2248 #define REDUCE_FLS(a, b) \
2250 if (a##_fls > b##_fls) { \
2260 * Reduce accuracy until either term fits in a u64, then proceed with
2261 * the other, so that finally we can do a u64/u64 division.
2263 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2264 REDUCE_FLS(nsec, frequency);
2265 REDUCE_FLS(sec, count);
2268 if (count_fls + sec_fls > 64) {
2269 divisor = nsec * frequency;
2271 while (count_fls + sec_fls > 64) {
2272 REDUCE_FLS(count, sec);
2276 dividend = count * sec;
2278 dividend = count * sec;
2280 while (nsec_fls + frequency_fls > 64) {
2281 REDUCE_FLS(nsec, frequency);
2285 divisor = nsec * frequency;
2291 return div64_u64(dividend, divisor);
2294 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
2296 struct hw_perf_event *hwc = &event->hw;
2297 s64 period, sample_period;
2300 period = perf_calculate_period(event, nsec, count);
2302 delta = (s64)(period - hwc->sample_period);
2303 delta = (delta + 7) / 8; /* low pass filter */
2305 sample_period = hwc->sample_period + delta;
2310 hwc->sample_period = sample_period;
2312 if (local64_read(&hwc->period_left) > 8*sample_period) {
2313 event->pmu->stop(event, PERF_EF_UPDATE);
2314 local64_set(&hwc->period_left, 0);
2315 event->pmu->start(event, PERF_EF_RELOAD);
2319 static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
2321 struct perf_event *event;
2322 struct hw_perf_event *hwc;
2323 u64 interrupts, now;
2326 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2327 if (event->state != PERF_EVENT_STATE_ACTIVE)
2330 if (!event_filter_match(event))
2335 interrupts = hwc->interrupts;
2336 hwc->interrupts = 0;
2339 * unthrottle events on the tick
2341 if (interrupts == MAX_INTERRUPTS) {
2342 perf_log_throttle(event, 1);
2343 event->pmu->start(event, 0);
2346 if (!event->attr.freq || !event->attr.sample_freq)
2349 event->pmu->read(event);
2350 now = local64_read(&event->count);
2351 delta = now - hwc->freq_count_stamp;
2352 hwc->freq_count_stamp = now;
2355 perf_adjust_period(event, period, delta);
2360 * Round-robin a context's events:
2362 static void rotate_ctx(struct perf_event_context *ctx)
2365 * Rotate the first entry last of non-pinned groups. Rotation might be
2366 * disabled by the inheritance code.
2368 if (!ctx->rotate_disable)
2369 list_rotate_left(&ctx->flexible_groups);
2373 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
2374 * because they're strictly cpu affine and rotate_start is called with IRQs
2375 * disabled, while rotate_context is called from IRQ context.
2377 static void perf_rotate_context(struct perf_cpu_context *cpuctx)
2379 u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
2380 struct perf_event_context *ctx = NULL;
2381 int rotate = 0, remove = 1;
2383 if (cpuctx->ctx.nr_events) {
2385 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2389 ctx = cpuctx->task_ctx;
2390 if (ctx && ctx->nr_events) {
2392 if (ctx->nr_events != ctx->nr_active)
2396 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2397 perf_pmu_disable(cpuctx->ctx.pmu);
2398 perf_ctx_adjust_freq(&cpuctx->ctx, interval);
2400 perf_ctx_adjust_freq(ctx, interval);
2405 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2407 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
2409 rotate_ctx(&cpuctx->ctx);
2413 perf_event_sched_in(cpuctx, ctx, current);
2417 list_del_init(&cpuctx->rotation_list);
2419 perf_pmu_enable(cpuctx->ctx.pmu);
2420 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2423 void perf_event_task_tick(void)
2425 struct list_head *head = &__get_cpu_var(rotation_list);
2426 struct perf_cpu_context *cpuctx, *tmp;
2428 WARN_ON(!irqs_disabled());
2430 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
2431 if (cpuctx->jiffies_interval == 1 ||
2432 !(jiffies % cpuctx->jiffies_interval))
2433 perf_rotate_context(cpuctx);
2437 static int event_enable_on_exec(struct perf_event *event,
2438 struct perf_event_context *ctx)
2440 if (!event->attr.enable_on_exec)
2443 event->attr.enable_on_exec = 0;
2444 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2447 __perf_event_mark_enabled(event, ctx);
2453 * Enable all of a task's events that have been marked enable-on-exec.
2454 * This expects task == current.
2456 static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2458 struct perf_event *event;
2459 unsigned long flags;
2463 local_irq_save(flags);
2464 if (!ctx || !ctx->nr_events)
2468 * We must ctxsw out cgroup events to avoid conflict
2469 * when invoking perf_task_event_sched_in() later on
2470 * in this function. Otherwise we end up trying to
2471 * ctxswin cgroup events which are already scheduled
2474 perf_cgroup_sched_out(current, NULL);
2476 raw_spin_lock(&ctx->lock);
2477 task_ctx_sched_out(ctx);
2479 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2480 ret = event_enable_on_exec(event, ctx);
2485 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2486 ret = event_enable_on_exec(event, ctx);
2492 * Unclone this context if we enabled any event.
2497 raw_spin_unlock(&ctx->lock);
2500 * Also calls ctxswin for cgroup events, if any:
2502 perf_event_context_sched_in(ctx, ctx->task);
2504 local_irq_restore(flags);
2508 * Cross CPU call to read the hardware event
2510 static void __perf_event_read(void *info)
2512 struct perf_event *event = info;
2513 struct perf_event_context *ctx = event->ctx;
2514 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2517 * If this is a task context, we need to check whether it is
2518 * the current task context of this cpu. If not it has been
2519 * scheduled out before the smp call arrived. In that case
2520 * event->count would have been updated to a recent sample
2521 * when the event was scheduled out.
2523 if (ctx->task && cpuctx->task_ctx != ctx)
2526 raw_spin_lock(&ctx->lock);
2527 if (ctx->is_active) {
2528 update_context_time(ctx);
2529 update_cgrp_time_from_event(event);
2531 update_event_times(event);
2532 if (event->state == PERF_EVENT_STATE_ACTIVE)
2533 event->pmu->read(event);
2534 raw_spin_unlock(&ctx->lock);
2537 static inline u64 perf_event_count(struct perf_event *event)
2539 return local64_read(&event->count) + atomic64_read(&event->child_count);
2542 static u64 perf_event_read(struct perf_event *event)
2545 * If event is enabled and currently active on a CPU, update the
2546 * value in the event structure:
2548 if (event->state == PERF_EVENT_STATE_ACTIVE) {
2549 smp_call_function_single(event->oncpu,
2550 __perf_event_read, event, 1);
2551 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2552 struct perf_event_context *ctx = event->ctx;
2553 unsigned long flags;
2555 raw_spin_lock_irqsave(&ctx->lock, flags);
2557 * may read while context is not active
2558 * (e.g., thread is blocked), in that case
2559 * we cannot update context time
2561 if (ctx->is_active) {
2562 update_context_time(ctx);
2563 update_cgrp_time_from_event(event);
2565 update_event_times(event);
2566 raw_spin_unlock_irqrestore(&ctx->lock, flags);
2569 return perf_event_count(event);
2573 * Initialize the perf_event context in a task_struct:
2575 static void __perf_event_init_context(struct perf_event_context *ctx)
2577 raw_spin_lock_init(&ctx->lock);
2578 mutex_init(&ctx->mutex);
2579 INIT_LIST_HEAD(&ctx->pinned_groups);
2580 INIT_LIST_HEAD(&ctx->flexible_groups);
2581 INIT_LIST_HEAD(&ctx->event_list);
2582 atomic_set(&ctx->refcount, 1);
2585 static struct perf_event_context *
2586 alloc_perf_context(struct pmu *pmu, struct task_struct *task)
2588 struct perf_event_context *ctx;
2590 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2594 __perf_event_init_context(ctx);
2597 get_task_struct(task);
2604 static struct task_struct *
2605 find_lively_task_by_vpid(pid_t vpid)
2607 struct task_struct *task;
2614 task = find_task_by_vpid(vpid);
2616 get_task_struct(task);
2620 return ERR_PTR(-ESRCH);
2622 /* Reuse ptrace permission checks for now. */
2624 if (!ptrace_may_access(task, PTRACE_MODE_READ))
2629 put_task_struct(task);
2630 return ERR_PTR(err);
2635 * Returns a matching context with refcount and pincount.
2637 static struct perf_event_context *
2638 find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
2640 struct perf_event_context *ctx;
2641 struct perf_cpu_context *cpuctx;
2642 unsigned long flags;
2646 /* Must be root to operate on a CPU event: */
2647 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
2648 return ERR_PTR(-EACCES);
2651 * We could be clever and allow to attach a event to an
2652 * offline CPU and activate it when the CPU comes up, but
2655 if (!cpu_online(cpu))
2656 return ERR_PTR(-ENODEV);
2658 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
2667 ctxn = pmu->task_ctx_nr;
2672 ctx = perf_lock_task_context(task, ctxn, &flags);
2676 raw_spin_unlock_irqrestore(&ctx->lock, flags);
2678 ctx = alloc_perf_context(pmu, task);
2684 mutex_lock(&task->perf_event_mutex);
2686 * If it has already passed perf_event_exit_task().
2687 * we must see PF_EXITING, it takes this mutex too.
2689 if (task->flags & PF_EXITING)
2691 else if (task->perf_event_ctxp[ctxn])
2696 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
2698 mutex_unlock(&task->perf_event_mutex);
2700 if (unlikely(err)) {
2712 return ERR_PTR(err);
2715 static void perf_event_free_filter(struct perf_event *event);
2717 static void free_event_rcu(struct rcu_head *head)
2719 struct perf_event *event;
2721 event = container_of(head, struct perf_event, rcu_head);
2723 put_pid_ns(event->ns);
2724 perf_event_free_filter(event);
2728 static void ring_buffer_put(struct ring_buffer *rb);
2730 static void free_event(struct perf_event *event)
2732 irq_work_sync(&event->pending);
2734 if (!event->parent) {
2735 if (event->attach_state & PERF_ATTACH_TASK)
2736 jump_label_dec(&perf_sched_events);
2737 if (event->attr.mmap || event->attr.mmap_data)
2738 atomic_dec(&nr_mmap_events);
2739 if (event->attr.comm)
2740 atomic_dec(&nr_comm_events);
2741 if (event->attr.task)
2742 atomic_dec(&nr_task_events);
2743 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2744 put_callchain_buffers();
2745 if (is_cgroup_event(event)) {
2746 atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
2747 jump_label_dec(&perf_sched_events);
2752 ring_buffer_put(event->rb);
2756 if (is_cgroup_event(event))
2757 perf_detach_cgroup(event);
2760 event->destroy(event);
2763 put_ctx(event->ctx);
2765 call_rcu(&event->rcu_head, free_event_rcu);
2768 int perf_event_release_kernel(struct perf_event *event)
2770 struct perf_event_context *ctx = event->ctx;
2772 WARN_ON_ONCE(ctx->parent_ctx);
2774 * There are two ways this annotation is useful:
2776 * 1) there is a lock recursion from perf_event_exit_task
2777 * see the comment there.
2779 * 2) there is a lock-inversion with mmap_sem through
2780 * perf_event_read_group(), which takes faults while
2781 * holding ctx->mutex, however this is called after
2782 * the last filedesc died, so there is no possibility
2783 * to trigger the AB-BA case.
2785 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
2786 raw_spin_lock_irq(&ctx->lock);
2787 perf_group_detach(event);
2788 raw_spin_unlock_irq(&ctx->lock);
2789 perf_remove_from_context(event);
2790 mutex_unlock(&ctx->mutex);
2796 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2799 * Called when the last reference to the file is gone.
2801 static int perf_release(struct inode *inode, struct file *file)
2803 struct perf_event *event = file->private_data;
2804 struct task_struct *owner;
2806 file->private_data = NULL;
2809 owner = ACCESS_ONCE(event->owner);
2811 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
2812 * !owner it means the list deletion is complete and we can indeed
2813 * free this event, otherwise we need to serialize on
2814 * owner->perf_event_mutex.
2816 smp_read_barrier_depends();
2819 * Since delayed_put_task_struct() also drops the last
2820 * task reference we can safely take a new reference
2821 * while holding the rcu_read_lock().
2823 get_task_struct(owner);
2828 mutex_lock(&owner->perf_event_mutex);
2830 * We have to re-check the event->owner field, if it is cleared
2831 * we raced with perf_event_exit_task(), acquiring the mutex
2832 * ensured they're done, and we can proceed with freeing the
2836 list_del_init(&event->owner_entry);
2837 mutex_unlock(&owner->perf_event_mutex);
2838 put_task_struct(owner);
2841 return perf_event_release_kernel(event);
2844 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
2846 struct perf_event *child;
2852 mutex_lock(&event->child_mutex);
2853 total += perf_event_read(event);
2854 *enabled += event->total_time_enabled +
2855 atomic64_read(&event->child_total_time_enabled);
2856 *running += event->total_time_running +
2857 atomic64_read(&event->child_total_time_running);
2859 list_for_each_entry(child, &event->child_list, child_list) {
2860 total += perf_event_read(child);
2861 *enabled += child->total_time_enabled;
2862 *running += child->total_time_running;
2864 mutex_unlock(&event->child_mutex);
2868 EXPORT_SYMBOL_GPL(perf_event_read_value);
2870 static int perf_event_read_group(struct perf_event *event,
2871 u64 read_format, char __user *buf)
2873 struct perf_event *leader = event->group_leader, *sub;
2874 int n = 0, size = 0, ret = -EFAULT;
2875 struct perf_event_context *ctx = leader->ctx;
2877 u64 count, enabled, running;
2879 mutex_lock(&ctx->mutex);
2880 count = perf_event_read_value(leader, &enabled, &running);
2882 values[n++] = 1 + leader->nr_siblings;
2883 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2884 values[n++] = enabled;
2885 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2886 values[n++] = running;
2887 values[n++] = count;
2888 if (read_format & PERF_FORMAT_ID)
2889 values[n++] = primary_event_id(leader);
2891 size = n * sizeof(u64);
2893 if (copy_to_user(buf, values, size))
2898 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2901 values[n++] = perf_event_read_value(sub, &enabled, &running);
2902 if (read_format & PERF_FORMAT_ID)
2903 values[n++] = primary_event_id(sub);
2905 size = n * sizeof(u64);
2907 if (copy_to_user(buf + ret, values, size)) {
2915 mutex_unlock(&ctx->mutex);
2920 static int perf_event_read_one(struct perf_event *event,
2921 u64 read_format, char __user *buf)
2923 u64 enabled, running;
2927 values[n++] = perf_event_read_value(event, &enabled, &running);
2928 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2929 values[n++] = enabled;
2930 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2931 values[n++] = running;
2932 if (read_format & PERF_FORMAT_ID)
2933 values[n++] = primary_event_id(event);
2935 if (copy_to_user(buf, values, n * sizeof(u64)))
2938 return n * sizeof(u64);
2942 * Read the performance event - simple non blocking version for now
2945 perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
2947 u64 read_format = event->attr.read_format;
2951 * Return end-of-file for a read on a event that is in
2952 * error state (i.e. because it was pinned but it couldn't be
2953 * scheduled on to the CPU at some point).
2955 if (event->state == PERF_EVENT_STATE_ERROR)
2958 if (count < event->read_size)
2961 WARN_ON_ONCE(event->ctx->parent_ctx);
2962 if (read_format & PERF_FORMAT_GROUP)
2963 ret = perf_event_read_group(event, read_format, buf);
2965 ret = perf_event_read_one(event, read_format, buf);
2971 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
2973 struct perf_event *event = file->private_data;
2975 return perf_read_hw(event, buf, count);
2978 static unsigned int perf_poll(struct file *file, poll_table *wait)
2980 struct perf_event *event = file->private_data;
2981 struct ring_buffer *rb;
2982 unsigned int events = POLL_HUP;
2985 rb = rcu_dereference(event->rb);
2987 events = atomic_xchg(&rb->poll, 0);
2990 poll_wait(file, &event->waitq, wait);
2995 static void perf_event_reset(struct perf_event *event)
2997 (void)perf_event_read(event);
2998 local64_set(&event->count, 0);
2999 perf_event_update_userpage(event);
3003 * Holding the top-level event's child_mutex means that any
3004 * descendant process that has inherited this event will block
3005 * in sync_child_event if it goes to exit, thus satisfying the
3006 * task existence requirements of perf_event_enable/disable.
3008 static void perf_event_for_each_child(struct perf_event *event,
3009 void (*func)(struct perf_event *))
3011 struct perf_event *child;
3013 WARN_ON_ONCE(event->ctx->parent_ctx);
3014 mutex_lock(&event->child_mutex);
3016 list_for_each_entry(child, &event->child_list, child_list)
3018 mutex_unlock(&event->child_mutex);
3021 static void perf_event_for_each(struct perf_event *event,
3022 void (*func)(struct perf_event *))
3024 struct perf_event_context *ctx = event->ctx;
3025 struct perf_event *sibling;
3027 WARN_ON_ONCE(ctx->parent_ctx);
3028 mutex_lock(&ctx->mutex);
3029 event = event->group_leader;
3031 perf_event_for_each_child(event, func);
3033 list_for_each_entry(sibling, &event->sibling_list, group_entry)
3034 perf_event_for_each_child(event, func);
3035 mutex_unlock(&ctx->mutex);
3038 static int perf_event_period(struct perf_event *event, u64 __user *arg)
3040 struct perf_event_context *ctx = event->ctx;
3044 if (!is_sampling_event(event))
3047 if (copy_from_user(&value, arg, sizeof(value)))
3053 raw_spin_lock_irq(&ctx->lock);
3054 if (event->attr.freq) {
3055 if (value > sysctl_perf_event_sample_rate) {
3060 event->attr.sample_freq = value;
3062 event->attr.sample_period = value;
3063 event->hw.sample_period = value;
3066 raw_spin_unlock_irq(&ctx->lock);
3071 static const struct file_operations perf_fops;
3073 static struct perf_event *perf_fget_light(int fd, int *fput_needed)
3077 file = fget_light(fd, fput_needed);
3079 return ERR_PTR(-EBADF);
3081 if (file->f_op != &perf_fops) {
3082 fput_light(file, *fput_needed);
3084 return ERR_PTR(-EBADF);
3087 return file->private_data;
3090 static int perf_event_set_output(struct perf_event *event,
3091 struct perf_event *output_event);
3092 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
3094 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3096 struct perf_event *event = file->private_data;
3097 void (*func)(struct perf_event *);
3101 case PERF_EVENT_IOC_ENABLE:
3102 func = perf_event_enable;
3104 case PERF_EVENT_IOC_DISABLE:
3105 func = perf_event_disable;
3107 case PERF_EVENT_IOC_RESET:
3108 func = perf_event_reset;
3111 case PERF_EVENT_IOC_REFRESH:
3112 return perf_event_refresh(event, arg);
3114 case PERF_EVENT_IOC_PERIOD:
3115 return perf_event_period(event, (u64 __user *)arg);
3117 case PERF_EVENT_IOC_SET_OUTPUT:
3119 struct perf_event *output_event = NULL;
3120 int fput_needed = 0;
3124 output_event = perf_fget_light(arg, &fput_needed);
3125 if (IS_ERR(output_event))
3126 return PTR_ERR(output_event);
3129 ret = perf_event_set_output(event, output_event);
3131 fput_light(output_event->filp, fput_needed);
3136 case PERF_EVENT_IOC_SET_FILTER:
3137 return perf_event_set_filter(event, (void __user *)arg);
3143 if (flags & PERF_IOC_FLAG_GROUP)
3144 perf_event_for_each(event, func);
3146 perf_event_for_each_child(event, func);
3151 int perf_event_task_enable(void)
3153 struct perf_event *event;
3155 mutex_lock(¤t->perf_event_mutex);
3156 list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
3157 perf_event_for_each_child(event, perf_event_enable);
3158 mutex_unlock(¤t->perf_event_mutex);
3163 int perf_event_task_disable(void)
3165 struct perf_event *event;
3167 mutex_lock(¤t->perf_event_mutex);
3168 list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
3169 perf_event_for_each_child(event, perf_event_disable);
3170 mutex_unlock(¤t->perf_event_mutex);
3175 #ifndef PERF_EVENT_INDEX_OFFSET
3176 # define PERF_EVENT_INDEX_OFFSET 0
3179 static int perf_event_index(struct perf_event *event)
3181 if (event->hw.state & PERF_HES_STOPPED)
3184 if (event->state != PERF_EVENT_STATE_ACTIVE)
3187 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
3190 static void calc_timer_values(struct perf_event *event,
3197 ctx_time = event->shadow_ctx_time + now;
3198 *enabled = ctx_time - event->tstamp_enabled;
3199 *running = ctx_time - event->tstamp_running;
3203 * Callers need to ensure there can be no nesting of this function, otherwise
3204 * the seqlock logic goes bad. We can not serialize this because the arch
3205 * code calls this from NMI context.
3207 void perf_event_update_userpage(struct perf_event *event)
3209 struct perf_event_mmap_page *userpg;
3210 struct ring_buffer *rb;
3211 u64 enabled, running;
3215 * compute total_time_enabled, total_time_running
3216 * based on snapshot values taken when the event
3217 * was last scheduled in.
3219 * we cannot simply called update_context_time()
3220 * because of locking issue as we can be called in
3223 calc_timer_values(event, &enabled, &running);
3224 rb = rcu_dereference(event->rb);
3228 userpg = rb->user_page;
3231 * Disable preemption so as to not let the corresponding user-space
3232 * spin too long if we get preempted.
3237 userpg->index = perf_event_index(event);
3238 userpg->offset = perf_event_count(event);
3239 if (event->state == PERF_EVENT_STATE_ACTIVE)
3240 userpg->offset -= local64_read(&event->hw.prev_count);
3242 userpg->time_enabled = enabled +
3243 atomic64_read(&event->child_total_time_enabled);
3245 userpg->time_running = running +
3246 atomic64_read(&event->child_total_time_running);
3255 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3257 struct perf_event *event = vma->vm_file->private_data;
3258 struct ring_buffer *rb;
3259 int ret = VM_FAULT_SIGBUS;
3261 if (vmf->flags & FAULT_FLAG_MKWRITE) {
3262 if (vmf->pgoff == 0)
3268 rb = rcu_dereference(event->rb);
3272 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3275 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
3279 get_page(vmf->page);
3280 vmf->page->mapping = vma->vm_file->f_mapping;
3281 vmf->page->index = vmf->pgoff;
3290 static void rb_free_rcu(struct rcu_head *rcu_head)
3292 struct ring_buffer *rb;
3294 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
3298 static struct ring_buffer *ring_buffer_get(struct perf_event *event)
3300 struct ring_buffer *rb;
3303 rb = rcu_dereference(event->rb);
3305 if (!atomic_inc_not_zero(&rb->refcount))
3313 static void ring_buffer_put(struct ring_buffer *rb)
3315 if (!atomic_dec_and_test(&rb->refcount))
3318 call_rcu(&rb->rcu_head, rb_free_rcu);
3321 static void perf_mmap_open(struct vm_area_struct *vma)
3323 struct perf_event *event = vma->vm_file->private_data;
3325 atomic_inc(&event->mmap_count);
3328 static void perf_mmap_close(struct vm_area_struct *vma)
3330 struct perf_event *event = vma->vm_file->private_data;
3332 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
3333 unsigned long size = perf_data_size(event->rb);
3334 struct user_struct *user = event->mmap_user;
3335 struct ring_buffer *rb = event->rb;
3337 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
3338 vma->vm_mm->pinned_vm -= event->mmap_locked;
3339 rcu_assign_pointer(event->rb, NULL);
3340 mutex_unlock(&event->mmap_mutex);
3342 ring_buffer_put(rb);
3347 static const struct vm_operations_struct perf_mmap_vmops = {
3348 .open = perf_mmap_open,
3349 .close = perf_mmap_close,
3350 .fault = perf_mmap_fault,
3351 .page_mkwrite = perf_mmap_fault,
3354 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3356 struct perf_event *event = file->private_data;
3357 unsigned long user_locked, user_lock_limit;
3358 struct user_struct *user = current_user();
3359 unsigned long locked, lock_limit;
3360 struct ring_buffer *rb;
3361 unsigned long vma_size;
3362 unsigned long nr_pages;
3363 long user_extra, extra;
3364 int ret = 0, flags = 0;
3367 * Don't allow mmap() of inherited per-task counters. This would
3368 * create a performance issue due to all children writing to the
3371 if (event->cpu == -1 && event->attr.inherit)
3374 if (!(vma->vm_flags & VM_SHARED))
3377 vma_size = vma->vm_end - vma->vm_start;
3378 nr_pages = (vma_size / PAGE_SIZE) - 1;
3381 * If we have rb pages ensure they're a power-of-two number, so we
3382 * can do bitmasks instead of modulo.
3384 if (nr_pages != 0 && !is_power_of_2(nr_pages))
3387 if (vma_size != PAGE_SIZE * (1 + nr_pages))
3390 if (vma->vm_pgoff != 0)
3393 WARN_ON_ONCE(event->ctx->parent_ctx);
3394 mutex_lock(&event->mmap_mutex);
3396 if (event->rb->nr_pages == nr_pages)
3397 atomic_inc(&event->rb->refcount);
3403 user_extra = nr_pages + 1;
3404 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
3407 * Increase the limit linearly with more CPUs:
3409 user_lock_limit *= num_online_cpus();
3411 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
3414 if (user_locked > user_lock_limit)
3415 extra = user_locked - user_lock_limit;
3417 lock_limit = rlimit(RLIMIT_MEMLOCK);
3418 lock_limit >>= PAGE_SHIFT;
3419 locked = vma->vm_mm->pinned_vm + extra;
3421 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3422 !capable(CAP_IPC_LOCK)) {
3429 if (vma->vm_flags & VM_WRITE)
3430 flags |= RING_BUFFER_WRITABLE;
3432 rb = rb_alloc(nr_pages,
3433 event->attr.watermark ? event->attr.wakeup_watermark : 0,
3440 rcu_assign_pointer(event->rb, rb);
3442 atomic_long_add(user_extra, &user->locked_vm);
3443 event->mmap_locked = extra;
3444 event->mmap_user = get_current_user();
3445 vma->vm_mm->pinned_vm += event->mmap_locked;
3449 atomic_inc(&event->mmap_count);
3450 mutex_unlock(&event->mmap_mutex);
3452 vma->vm_flags |= VM_RESERVED;
3453 vma->vm_ops = &perf_mmap_vmops;
3458 static int perf_fasync(int fd, struct file *filp, int on)
3460 struct inode *inode = filp->f_path.dentry->d_inode;
3461 struct perf_event *event = filp->private_data;
3464 mutex_lock(&inode->i_mutex);
3465 retval = fasync_helper(fd, filp, on, &event->fasync);
3466 mutex_unlock(&inode->i_mutex);
3474 static const struct file_operations perf_fops = {
3475 .llseek = no_llseek,
3476 .release = perf_release,
3479 .unlocked_ioctl = perf_ioctl,
3480 .compat_ioctl = perf_ioctl,
3482 .fasync = perf_fasync,
3488 * If there's data, ensure we set the poll() state and publish everything
3489 * to user-space before waking everybody up.
3492 void perf_event_wakeup(struct perf_event *event)
3494 wake_up_all(&event->waitq);
3496 if (event->pending_kill) {
3497 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
3498 event->pending_kill = 0;
3502 static void perf_pending_event(struct irq_work *entry)
3504 struct perf_event *event = container_of(entry,
3505 struct perf_event, pending);
3507 if (event->pending_disable) {
3508 event->pending_disable = 0;
3509 __perf_event_disable(event);
3512 if (event->pending_wakeup) {
3513 event->pending_wakeup = 0;
3514 perf_event_wakeup(event);
3519 * We assume there is only KVM supporting the callbacks.
3520 * Later on, we might change it to a list if there is
3521 * another virtualization implementation supporting the callbacks.
3523 struct perf_guest_info_callbacks *perf_guest_cbs;
3525 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3527 perf_guest_cbs = cbs;
3530 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3532 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3534 perf_guest_cbs = NULL;
3537 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3539 static void __perf_event_header__init_id(struct perf_event_header *header,
3540 struct perf_sample_data *data,
3541 struct perf_event *event)
3543 u64 sample_type = event->attr.sample_type;
3545 data->type = sample_type;
3546 header->size += event->id_header_size;
3548 if (sample_type & PERF_SAMPLE_TID) {
3549 /* namespace issues */
3550 data->tid_entry.pid = perf_event_pid(event, current);
3551 data->tid_entry.tid = perf_event_tid(event, current);
3554 if (sample_type & PERF_SAMPLE_TIME)
3555 data->time = perf_clock();
3557 if (sample_type & PERF_SAMPLE_ID)
3558 data->id = primary_event_id(event);
3560 if (sample_type & PERF_SAMPLE_STREAM_ID)
3561 data->stream_id = event->id;
3563 if (sample_type & PERF_SAMPLE_CPU) {
3564 data->cpu_entry.cpu = raw_smp_processor_id();
3565 data->cpu_entry.reserved = 0;
3569 void perf_event_header__init_id(struct perf_event_header *header,
3570 struct perf_sample_data *data,
3571 struct perf_event *event)
3573 if (event->attr.sample_id_all)
3574 __perf_event_header__init_id(header, data, event);
3577 static void __perf_event__output_id_sample(struct perf_output_handle *handle,
3578 struct perf_sample_data *data)
3580 u64 sample_type = data->type;
3582 if (sample_type & PERF_SAMPLE_TID)
3583 perf_output_put(handle, data->tid_entry);
3585 if (sample_type & PERF_SAMPLE_TIME)
3586 perf_output_put(handle, data->time);
3588 if (sample_type & PERF_SAMPLE_ID)
3589 perf_output_put(handle, data->id);
3591 if (sample_type & PERF_SAMPLE_STREAM_ID)
3592 perf_output_put(handle, data->stream_id);
3594 if (sample_type & PERF_SAMPLE_CPU)
3595 perf_output_put(handle, data->cpu_entry);
3598 void perf_event__output_id_sample(struct perf_event *event,
3599 struct perf_output_handle *handle,
3600 struct perf_sample_data *sample)
3602 if (event->attr.sample_id_all)
3603 __perf_event__output_id_sample(handle, sample);
3606 static void perf_output_read_one(struct perf_output_handle *handle,
3607 struct perf_event *event,
3608 u64 enabled, u64 running)
3610 u64 read_format = event->attr.read_format;
3614 values[n++] = perf_event_count(event);
3615 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3616 values[n++] = enabled +
3617 atomic64_read(&event->child_total_time_enabled);
3619 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3620 values[n++] = running +
3621 atomic64_read(&event->child_total_time_running);
3623 if (read_format & PERF_FORMAT_ID)
3624 values[n++] = primary_event_id(event);
3626 __output_copy(handle, values, n * sizeof(u64));
3630 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3632 static void perf_output_read_group(struct perf_output_handle *handle,
3633 struct perf_event *event,
3634 u64 enabled, u64 running)
3636 struct perf_event *leader = event->group_leader, *sub;
3637 u64 read_format = event->attr.read_format;
3641 values[n++] = 1 + leader->nr_siblings;
3643 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3644 values[n++] = enabled;
3646 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3647 values[n++] = running;
3649 if (leader != event)
3650 leader->pmu->read(leader);
3652 values[n++] = perf_event_count(leader);
3653 if (read_format & PERF_FORMAT_ID)
3654 values[n++] = primary_event_id(leader);
3656 __output_copy(handle, values, n * sizeof(u64));
3658 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3662 sub->pmu->read(sub);
3664 values[n++] = perf_event_count(sub);
3665 if (read_format & PERF_FORMAT_ID)
3666 values[n++] = primary_event_id(sub);
3668 __output_copy(handle, values, n * sizeof(u64));
3672 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
3673 PERF_FORMAT_TOTAL_TIME_RUNNING)
3675 static void perf_output_read(struct perf_output_handle *handle,
3676 struct perf_event *event)
3678 u64 enabled = 0, running = 0;
3679 u64 read_format = event->attr.read_format;
3682 * compute total_time_enabled, total_time_running
3683 * based on snapshot values taken when the event
3684 * was last scheduled in.
3686 * we cannot simply called update_context_time()
3687 * because of locking issue as we are called in
3690 if (read_format & PERF_FORMAT_TOTAL_TIMES)
3691 calc_timer_values(event, &enabled, &running);
3693 if (event->attr.read_format & PERF_FORMAT_GROUP)
3694 perf_output_read_group(handle, event, enabled, running);
3696 perf_output_read_one(handle, event, enabled, running);
3699 void perf_output_sample(struct perf_output_handle *handle,
3700 struct perf_event_header *header,
3701 struct perf_sample_data *data,
3702 struct perf_event *event)
3704 u64 sample_type = data->type;
3706 perf_output_put(handle, *header);
3708 if (sample_type & PERF_SAMPLE_IP)
3709 perf_output_put(handle, data->ip);
3711 if (sample_type & PERF_SAMPLE_TID)
3712 perf_output_put(handle, data->tid_entry);
3714 if (sample_type & PERF_SAMPLE_TIME)
3715 perf_output_put(handle, data->time);
3717 if (sample_type & PERF_SAMPLE_ADDR)
3718 perf_output_put(handle, data->addr);
3720 if (sample_type & PERF_SAMPLE_ID)
3721 perf_output_put(handle, data->id);
3723 if (sample_type & PERF_SAMPLE_STREAM_ID)
3724 perf_output_put(handle, data->stream_id);
3726 if (sample_type & PERF_SAMPLE_CPU)
3727 perf_output_put(handle, data->cpu_entry);
3729 if (sample_type & PERF_SAMPLE_PERIOD)
3730 perf_output_put(handle, data->period);
3732 if (sample_type & PERF_SAMPLE_READ)
3733 perf_output_read(handle, event);
3735 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3736 if (data->callchain) {
3739 if (data->callchain)
3740 size += data->callchain->nr;
3742 size *= sizeof(u64);
3744 __output_copy(handle, data->callchain, size);
3747 perf_output_put(handle, nr);
3751 if (sample_type & PERF_SAMPLE_RAW) {
3753 perf_output_put(handle, data->raw->size);
3754 __output_copy(handle, data->raw->data,
3761 .size = sizeof(u32),
3764 perf_output_put(handle, raw);
3768 if (!event->attr.watermark) {
3769 int wakeup_events = event->attr.wakeup_events;
3771 if (wakeup_events) {
3772 struct ring_buffer *rb = handle->rb;
3773 int events = local_inc_return(&rb->events);
3775 if (events >= wakeup_events) {
3776 local_sub(wakeup_events, &rb->events);
3777 local_inc(&rb->wakeup);
3783 void perf_prepare_sample(struct perf_event_header *header,
3784 struct perf_sample_data *data,
3785 struct perf_event *event,
3786 struct pt_regs *regs)
3788 u64 sample_type = event->attr.sample_type;
3790 header->type = PERF_RECORD_SAMPLE;
3791 header->size = sizeof(*header) + event->header_size;
3794 header->misc |= perf_misc_flags(regs);
3796 __perf_event_header__init_id(header, data, event);
3798 if (sample_type & PERF_SAMPLE_IP)
3799 data->ip = perf_instruction_pointer(regs);
3801 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3804 data->callchain = perf_callchain(regs);
3806 if (data->callchain)
3807 size += data->callchain->nr;
3809 header->size += size * sizeof(u64);
3812 if (sample_type & PERF_SAMPLE_RAW) {
3813 int size = sizeof(u32);
3816 size += data->raw->size;
3818 size += sizeof(u32);
3820 WARN_ON_ONCE(size & (sizeof(u64)-1));
3821 header->size += size;
3825 static void perf_event_output(struct perf_event *event,
3826 struct perf_sample_data *data,
3827 struct pt_regs *regs)
3829 struct perf_output_handle handle;
3830 struct perf_event_header header;
3832 /* protect the callchain buffers */
3835 perf_prepare_sample(&header, data, event, regs);
3837 if (perf_output_begin(&handle, event, header.size))
3840 perf_output_sample(&handle, &header, data, event);
3842 perf_output_end(&handle);
3852 struct perf_read_event {
3853 struct perf_event_header header;
3860 perf_event_read_event(struct perf_event *event,
3861 struct task_struct *task)
3863 struct perf_output_handle handle;
3864 struct perf_sample_data sample;
3865 struct perf_read_event read_event = {
3867 .type = PERF_RECORD_READ,
3869 .size = sizeof(read_event) + event->read_size,
3871 .pid = perf_event_pid(event, task),
3872 .tid = perf_event_tid(event, task),
3876 perf_event_header__init_id(&read_event.header, &sample, event);
3877 ret = perf_output_begin(&handle, event, read_event.header.size);
3881 perf_output_put(&handle, read_event);
3882 perf_output_read(&handle, event);
3883 perf_event__output_id_sample(event, &handle, &sample);
3885 perf_output_end(&handle);
3889 * task tracking -- fork/exit
3891 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
3894 struct perf_task_event {
3895 struct task_struct *task;
3896 struct perf_event_context *task_ctx;
3899 struct perf_event_header header;
3909 static void perf_event_task_output(struct perf_event *event,
3910 struct perf_task_event *task_event)
3912 struct perf_output_handle handle;
3913 struct perf_sample_data sample;
3914 struct task_struct *task = task_event->task;
3915 int ret, size = task_event->event_id.header.size;
3917 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
3919 ret = perf_output_begin(&handle, event,
3920 task_event->event_id.header.size);
3924 task_event->event_id.pid = perf_event_pid(event, task);
3925 task_event->event_id.ppid = perf_event_pid(event, current);
3927 task_event->event_id.tid = perf_event_tid(event, task);
3928 task_event->event_id.ptid = perf_event_tid(event, current);
3930 perf_output_put(&handle, task_event->event_id);
3932 perf_event__output_id_sample(event, &handle, &sample);
3934 perf_output_end(&handle);
3936 task_event->event_id.header.size = size;
3939 static int perf_event_task_match(struct perf_event *event)
3941 if (event->state < PERF_EVENT_STATE_INACTIVE)
3944 if (!event_filter_match(event))
3947 if (event->attr.comm || event->attr.mmap ||
3948 event->attr.mmap_data || event->attr.task)
3954 static void perf_event_task_ctx(struct perf_event_context *ctx,
3955 struct perf_task_event *task_event)
3957 struct perf_event *event;
3959 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3960 if (perf_event_task_match(event))
3961 perf_event_task_output(event, task_event);
3965 static void perf_event_task_event(struct perf_task_event *task_event)
3967 struct perf_cpu_context *cpuctx;
3968 struct perf_event_context *ctx;
3973 list_for_each_entry_rcu(pmu, &pmus, entry) {
3974 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
3975 if (cpuctx->active_pmu != pmu)
3977 perf_event_task_ctx(&cpuctx->ctx, task_event);
3979 ctx = task_event->task_ctx;
3981 ctxn = pmu->task_ctx_nr;
3984 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
3987 perf_event_task_ctx(ctx, task_event);
3989 put_cpu_ptr(pmu->pmu_cpu_context);
3994 static void perf_event_task(struct task_struct *task,
3995 struct perf_event_context *task_ctx,
3998 struct perf_task_event task_event;
4000 if (!atomic_read(&nr_comm_events) &&
4001 !atomic_read(&nr_mmap_events) &&
4002 !atomic_read(&nr_task_events))
4005 task_event = (struct perf_task_event){
4007 .task_ctx = task_ctx,
4010 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
4012 .size = sizeof(task_event.event_id),
4018 .time = perf_clock(),
4022 perf_event_task_event(&task_event);
4025 void perf_event_fork(struct task_struct *task)
4027 perf_event_task(task, NULL, 1);
4034 struct perf_comm_event {
4035 struct task_struct *task;
4040 struct perf_event_header header;
4047 static void perf_event_comm_output(struct perf_event *event,
4048 struct perf_comm_event *comm_event)
4050 struct perf_output_handle handle;
4051 struct perf_sample_data sample;
4052 int size = comm_event->event_id.header.size;
4055 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
4056 ret = perf_output_begin(&handle, event,
4057 comm_event->event_id.header.size);
4062 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
4063 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
4065 perf_output_put(&handle, comm_event->event_id);
4066 __output_copy(&handle, comm_event->comm,
4067 comm_event->comm_size);
4069 perf_event__output_id_sample(event, &handle, &sample);
4071 perf_output_end(&handle);
4073 comm_event->event_id.header.size = size;
4076 static int perf_event_comm_match(struct perf_event *event)
4078 if (event->state < PERF_EVENT_STATE_INACTIVE)
4081 if (!event_filter_match(event))
4084 if (event->attr.comm)
4090 static void perf_event_comm_ctx(struct perf_event_context *ctx,
4091 struct perf_comm_event *comm_event)
4093 struct perf_event *event;
4095 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4096 if (perf_event_comm_match(event))
4097 perf_event_comm_output(event, comm_event);
4101 static void perf_event_comm_event(struct perf_comm_event *comm_event)
4103 struct perf_cpu_context *cpuctx;
4104 struct perf_event_context *ctx;
4105 char comm[TASK_COMM_LEN];
4110 memset(comm, 0, sizeof(comm));
4111 strlcpy(comm, comm_event->task->comm, sizeof(comm));
4112 size = ALIGN(strlen(comm)+1, sizeof(u64));
4114 comm_event->comm = comm;
4115 comm_event->comm_size = size;
4117 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
4119 list_for_each_entry_rcu(pmu, &pmus, entry) {
4120 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4121 if (cpuctx->active_pmu != pmu)
4123 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
4125 ctxn = pmu->task_ctx_nr;
4129 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4131 perf_event_comm_ctx(ctx, comm_event);
4133 put_cpu_ptr(pmu->pmu_cpu_context);
4138 void perf_event_comm(struct task_struct *task)
4140 struct perf_comm_event comm_event;
4141 struct perf_event_context *ctx;
4144 for_each_task_context_nr(ctxn) {
4145 ctx = task->perf_event_ctxp[ctxn];
4149 perf_event_enable_on_exec(ctx);
4152 if (!atomic_read(&nr_comm_events))
4155 comm_event = (struct perf_comm_event){
4161 .type = PERF_RECORD_COMM,
4170 perf_event_comm_event(&comm_event);
4177 struct perf_mmap_event {
4178 struct vm_area_struct *vma;
4180 const char *file_name;
4184 struct perf_event_header header;
4194 static void perf_event_mmap_output(struct perf_event *event,
4195 struct perf_mmap_event *mmap_event)
4197 struct perf_output_handle handle;
4198 struct perf_sample_data sample;
4199 int size = mmap_event->event_id.header.size;
4202 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
4203 ret = perf_output_begin(&handle, event,
4204 mmap_event->event_id.header.size);
4208 mmap_event->event_id.pid = perf_event_pid(event, current);
4209 mmap_event->event_id.tid = perf_event_tid(event, current);
4211 perf_output_put(&handle, mmap_event->event_id);
4212 __output_copy(&handle, mmap_event->file_name,
4213 mmap_event->file_size);
4215 perf_event__output_id_sample(event, &handle, &sample);
4217 perf_output_end(&handle);
4219 mmap_event->event_id.header.size = size;
4222 static int perf_event_mmap_match(struct perf_event *event,
4223 struct perf_mmap_event *mmap_event,
4226 if (event->state < PERF_EVENT_STATE_INACTIVE)
4229 if (!event_filter_match(event))
4232 if ((!executable && event->attr.mmap_data) ||
4233 (executable && event->attr.mmap))
4239 static void perf_event_mmap_ctx(struct perf_event_context *ctx,
4240 struct perf_mmap_event *mmap_event,
4243 struct perf_event *event;
4245 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4246 if (perf_event_mmap_match(event, mmap_event, executable))
4247 perf_event_mmap_output(event, mmap_event);
4251 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4253 struct perf_cpu_context *cpuctx;
4254 struct perf_event_context *ctx;
4255 struct vm_area_struct *vma = mmap_event->vma;
4256 struct file *file = vma->vm_file;
4264 memset(tmp, 0, sizeof(tmp));
4268 * d_path works from the end of the rb backwards, so we
4269 * need to add enough zero bytes after the string to handle
4270 * the 64bit alignment we do later.
4272 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
4274 name = strncpy(tmp, "//enomem", sizeof(tmp));
4277 name = d_path(&file->f_path, buf, PATH_MAX);
4279 name = strncpy(tmp, "//toolong", sizeof(tmp));
4283 if (arch_vma_name(mmap_event->vma)) {
4284 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4290 name = strncpy(tmp, "[vdso]", sizeof(tmp));
4292 } else if (vma->vm_start <= vma->vm_mm->start_brk &&
4293 vma->vm_end >= vma->vm_mm->brk) {
4294 name = strncpy(tmp, "[heap]", sizeof(tmp));
4296 } else if (vma->vm_start <= vma->vm_mm->start_stack &&
4297 vma->vm_end >= vma->vm_mm->start_stack) {
4298 name = strncpy(tmp, "[stack]", sizeof(tmp));
4302 name = strncpy(tmp, "//anon", sizeof(tmp));
4307 size = ALIGN(strlen(name)+1, sizeof(u64));
4309 mmap_event->file_name = name;
4310 mmap_event->file_size = size;
4312 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4315 list_for_each_entry_rcu(pmu, &pmus, entry) {
4316 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4317 if (cpuctx->active_pmu != pmu)
4319 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4320 vma->vm_flags & VM_EXEC);
4322 ctxn = pmu->task_ctx_nr;
4326 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4328 perf_event_mmap_ctx(ctx, mmap_event,
4329 vma->vm_flags & VM_EXEC);
4332 put_cpu_ptr(pmu->pmu_cpu_context);
4339 void perf_event_mmap(struct vm_area_struct *vma)
4341 struct perf_mmap_event mmap_event;
4343 if (!atomic_read(&nr_mmap_events))
4346 mmap_event = (struct perf_mmap_event){
4352 .type = PERF_RECORD_MMAP,
4353 .misc = PERF_RECORD_MISC_USER,
4358 .start = vma->vm_start,
4359 .len = vma->vm_end - vma->vm_start,
4360 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
4364 perf_event_mmap_event(&mmap_event);
4368 * IRQ throttle logging
4371 static void perf_log_throttle(struct perf_event *event, int enable)
4373 struct perf_output_handle handle;
4374 struct perf_sample_data sample;
4378 struct perf_event_header header;
4382 } throttle_event = {
4384 .type = PERF_RECORD_THROTTLE,
4386 .size = sizeof(throttle_event),
4388 .time = perf_clock(),
4389 .id = primary_event_id(event),
4390 .stream_id = event->id,
4394 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4396 perf_event_header__init_id(&throttle_event.header, &sample, event);
4398 ret = perf_output_begin(&handle, event,
4399 throttle_event.header.size);
4403 perf_output_put(&handle, throttle_event);
4404 perf_event__output_id_sample(event, &handle, &sample);
4405 perf_output_end(&handle);
4409 * Generic event overflow handling, sampling.
4412 static int __perf_event_overflow(struct perf_event *event,
4413 int throttle, struct perf_sample_data *data,
4414 struct pt_regs *regs)
4416 int events = atomic_read(&event->event_limit);
4417 struct hw_perf_event *hwc = &event->hw;
4421 * Non-sampling counters might still use the PMI to fold short
4422 * hardware counters, ignore those.
4424 if (unlikely(!is_sampling_event(event)))
4427 if (unlikely(hwc->interrupts >= max_samples_per_tick)) {
4429 hwc->interrupts = MAX_INTERRUPTS;
4430 perf_log_throttle(event, 0);
4436 if (event->attr.freq) {
4437 u64 now = perf_clock();
4438 s64 delta = now - hwc->freq_time_stamp;
4440 hwc->freq_time_stamp = now;
4442 if (delta > 0 && delta < 2*TICK_NSEC)
4443 perf_adjust_period(event, delta, hwc->last_period);
4447 * XXX event_limit might not quite work as expected on inherited
4451 event->pending_kill = POLL_IN;
4452 if (events && atomic_dec_and_test(&event->event_limit)) {
4454 event->pending_kill = POLL_HUP;
4455 event->pending_disable = 1;
4456 irq_work_queue(&event->pending);
4459 if (event->overflow_handler)
4460 event->overflow_handler(event, data, regs);
4462 perf_event_output(event, data, regs);
4464 if (event->fasync && event->pending_kill) {
4465 event->pending_wakeup = 1;
4466 irq_work_queue(&event->pending);
4472 int perf_event_overflow(struct perf_event *event,
4473 struct perf_sample_data *data,
4474 struct pt_regs *regs)
4476 return __perf_event_overflow(event, 1, data, regs);
4480 * Generic software event infrastructure
4483 struct swevent_htable {
4484 struct swevent_hlist *swevent_hlist;
4485 struct mutex hlist_mutex;
4488 /* Recursion avoidance in each contexts */
4489 int recursion[PERF_NR_CONTEXTS];
4492 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
4495 * We directly increment event->count and keep a second value in
4496 * event->hw.period_left to count intervals. This period event
4497 * is kept in the range [-sample_period, 0] so that we can use the
4501 static u64 perf_swevent_set_period(struct perf_event *event)
4503 struct hw_perf_event *hwc = &event->hw;
4504 u64 period = hwc->last_period;
4508 hwc->last_period = hwc->sample_period;
4511 old = val = local64_read(&hwc->period_left);
4515 nr = div64_u64(period + val, period);
4516 offset = nr * period;
4518 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
4524 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
4525 struct perf_sample_data *data,
4526 struct pt_regs *regs)
4528 struct hw_perf_event *hwc = &event->hw;
4532 overflow = perf_swevent_set_period(event);
4534 if (hwc->interrupts == MAX_INTERRUPTS)
4537 for (; overflow; overflow--) {
4538 if (__perf_event_overflow(event, throttle,
4541 * We inhibit the overflow from happening when
4542 * hwc->interrupts == MAX_INTERRUPTS.
4550 static void perf_swevent_event(struct perf_event *event, u64 nr,
4551 struct perf_sample_data *data,
4552 struct pt_regs *regs)
4554 struct hw_perf_event *hwc = &event->hw;
4556 local64_add(nr, &event->count);
4561 if (!is_sampling_event(event))
4564 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
4566 return perf_swevent_overflow(event, 1, data, regs);
4568 data->period = event->hw.last_period;
4570 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4571 return perf_swevent_overflow(event, 1, data, regs);
4573 if (local64_add_negative(nr, &hwc->period_left))
4576 perf_swevent_overflow(event, 0, data, regs);
4579 static int perf_exclude_event(struct perf_event *event,
4580 struct pt_regs *regs)
4582 if (event->hw.state & PERF_HES_STOPPED)
4586 if (event->attr.exclude_user && user_mode(regs))
4589 if (event->attr.exclude_kernel && !user_mode(regs))
4596 static int perf_swevent_match(struct perf_event *event,
4597 enum perf_type_id type,
4599 struct perf_sample_data *data,
4600 struct pt_regs *regs)
4602 if (event->attr.type != type)
4605 if (event->attr.config != event_id)
4608 if (perf_exclude_event(event, regs))
4614 static inline u64 swevent_hash(u64 type, u32 event_id)
4616 u64 val = event_id | (type << 32);
4618 return hash_64(val, SWEVENT_HLIST_BITS);
4621 static inline struct hlist_head *
4622 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
4624 u64 hash = swevent_hash(type, event_id);
4626 return &hlist->heads[hash];
4629 /* For the read side: events when they trigger */
4630 static inline struct hlist_head *
4631 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
4633 struct swevent_hlist *hlist;
4635 hlist = rcu_dereference(swhash->swevent_hlist);
4639 return __find_swevent_head(hlist, type, event_id);
4642 /* For the event head insertion and removal in the hlist */
4643 static inline struct hlist_head *
4644 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
4646 struct swevent_hlist *hlist;
4647 u32 event_id = event->attr.config;
4648 u64 type = event->attr.type;
4651 * Event scheduling is always serialized against hlist allocation
4652 * and release. Which makes the protected version suitable here.
4653 * The context lock guarantees that.
4655 hlist = rcu_dereference_protected(swhash->swevent_hlist,
4656 lockdep_is_held(&event->ctx->lock));
4660 return __find_swevent_head(hlist, type, event_id);
4663 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4665 struct perf_sample_data *data,
4666 struct pt_regs *regs)
4668 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4669 struct perf_event *event;
4670 struct hlist_node *node;
4671 struct hlist_head *head;
4674 head = find_swevent_head_rcu(swhash, type, event_id);
4678 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4679 if (perf_swevent_match(event, type, event_id, data, regs))
4680 perf_swevent_event(event, nr, data, regs);
4686 int perf_swevent_get_recursion_context(void)
4688 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4690 return get_recursion_context(swhash->recursion);
4692 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
4694 inline void perf_swevent_put_recursion_context(int rctx)
4696 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4698 put_recursion_context(swhash->recursion, rctx);
4701 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
4703 struct perf_sample_data data;
4706 preempt_disable_notrace();
4707 rctx = perf_swevent_get_recursion_context();
4711 perf_sample_data_init(&data, addr);
4713 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
4715 perf_swevent_put_recursion_context(rctx);
4716 preempt_enable_notrace();
4719 static void perf_swevent_read(struct perf_event *event)
4723 static int perf_swevent_add(struct perf_event *event, int flags)
4725 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4726 struct hw_perf_event *hwc = &event->hw;
4727 struct hlist_head *head;
4729 if (is_sampling_event(event)) {
4730 hwc->last_period = hwc->sample_period;
4731 perf_swevent_set_period(event);
4734 hwc->state = !(flags & PERF_EF_START);
4736 head = find_swevent_head(swhash, event);
4737 if (WARN_ON_ONCE(!head))
4740 hlist_add_head_rcu(&event->hlist_entry, head);
4745 static void perf_swevent_del(struct perf_event *event, int flags)
4747 hlist_del_rcu(&event->hlist_entry);
4750 static void perf_swevent_start(struct perf_event *event, int flags)
4752 event->hw.state = 0;
4755 static void perf_swevent_stop(struct perf_event *event, int flags)
4757 event->hw.state = PERF_HES_STOPPED;
4760 /* Deref the hlist from the update side */
4761 static inline struct swevent_hlist *
4762 swevent_hlist_deref(struct swevent_htable *swhash)
4764 return rcu_dereference_protected(swhash->swevent_hlist,
4765 lockdep_is_held(&swhash->hlist_mutex));
4768 static void swevent_hlist_release(struct swevent_htable *swhash)
4770 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
4775 rcu_assign_pointer(swhash->swevent_hlist, NULL);
4776 kfree_rcu(hlist, rcu_head);
4779 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
4781 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
4783 mutex_lock(&swhash->hlist_mutex);
4785 if (!--swhash->hlist_refcount)
4786 swevent_hlist_release(swhash);
4788 mutex_unlock(&swhash->hlist_mutex);
4791 static void swevent_hlist_put(struct perf_event *event)
4795 if (event->cpu != -1) {
4796 swevent_hlist_put_cpu(event, event->cpu);
4800 for_each_possible_cpu(cpu)
4801 swevent_hlist_put_cpu(event, cpu);
4804 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
4806 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
4809 mutex_lock(&swhash->hlist_mutex);
4811 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
4812 struct swevent_hlist *hlist;
4814 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
4819 rcu_assign_pointer(swhash->swevent_hlist, hlist);
4821 swhash->hlist_refcount++;
4823 mutex_unlock(&swhash->hlist_mutex);
4828 static int swevent_hlist_get(struct perf_event *event)
4831 int cpu, failed_cpu;
4833 if (event->cpu != -1)
4834 return swevent_hlist_get_cpu(event, event->cpu);
4837 for_each_possible_cpu(cpu) {
4838 err = swevent_hlist_get_cpu(event, cpu);
4848 for_each_possible_cpu(cpu) {
4849 if (cpu == failed_cpu)
4851 swevent_hlist_put_cpu(event, cpu);
4858 struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
4860 static void sw_perf_event_destroy(struct perf_event *event)
4862 u64 event_id = event->attr.config;
4864 WARN_ON(event->parent);
4866 jump_label_dec(&perf_swevent_enabled[event_id]);
4867 swevent_hlist_put(event);
4870 static int perf_swevent_init(struct perf_event *event)
4872 int event_id = event->attr.config;
4874 if (event->attr.type != PERF_TYPE_SOFTWARE)
4878 case PERF_COUNT_SW_CPU_CLOCK:
4879 case PERF_COUNT_SW_TASK_CLOCK:
4886 if (event_id >= PERF_COUNT_SW_MAX)
4889 if (!event->parent) {
4892 err = swevent_hlist_get(event);
4896 jump_label_inc(&perf_swevent_enabled[event_id]);
4897 event->destroy = sw_perf_event_destroy;
4903 static struct pmu perf_swevent = {
4904 .task_ctx_nr = perf_sw_context,
4906 .event_init = perf_swevent_init,
4907 .add = perf_swevent_add,
4908 .del = perf_swevent_del,
4909 .start = perf_swevent_start,
4910 .stop = perf_swevent_stop,
4911 .read = perf_swevent_read,
4914 #ifdef CONFIG_EVENT_TRACING
4916 static int perf_tp_filter_match(struct perf_event *event,
4917 struct perf_sample_data *data)
4919 void *record = data->raw->data;
4921 if (likely(!event->filter) || filter_match_preds(event->filter, record))
4926 static int perf_tp_event_match(struct perf_event *event,
4927 struct perf_sample_data *data,
4928 struct pt_regs *regs)
4930 if (event->hw.state & PERF_HES_STOPPED)
4933 * All tracepoints are from kernel-space.
4935 if (event->attr.exclude_kernel)
4938 if (!perf_tp_filter_match(event, data))
4944 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
4945 struct pt_regs *regs, struct hlist_head *head, int rctx)
4947 struct perf_sample_data data;
4948 struct perf_event *event;
4949 struct hlist_node *node;
4951 struct perf_raw_record raw = {
4956 perf_sample_data_init(&data, addr);
4959 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4960 if (perf_tp_event_match(event, &data, regs))
4961 perf_swevent_event(event, count, &data, regs);
4964 perf_swevent_put_recursion_context(rctx);
4966 EXPORT_SYMBOL_GPL(perf_tp_event);
4968 static void tp_perf_event_destroy(struct perf_event *event)
4970 perf_trace_destroy(event);
4973 static int perf_tp_event_init(struct perf_event *event)
4977 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4980 err = perf_trace_init(event);
4984 event->destroy = tp_perf_event_destroy;
4989 static struct pmu perf_tracepoint = {
4990 .task_ctx_nr = perf_sw_context,
4992 .event_init = perf_tp_event_init,
4993 .add = perf_trace_add,
4994 .del = perf_trace_del,
4995 .start = perf_swevent_start,
4996 .stop = perf_swevent_stop,
4997 .read = perf_swevent_read,
5000 static inline void perf_tp_register(void)
5002 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
5005 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5010 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5013 filter_str = strndup_user(arg, PAGE_SIZE);
5014 if (IS_ERR(filter_str))
5015 return PTR_ERR(filter_str);
5017 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
5023 static void perf_event_free_filter(struct perf_event *event)
5025 ftrace_profile_free_filter(event);
5030 static inline void perf_tp_register(void)
5034 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5039 static void perf_event_free_filter(struct perf_event *event)
5043 #endif /* CONFIG_EVENT_TRACING */
5045 #ifdef CONFIG_HAVE_HW_BREAKPOINT
5046 void perf_bp_event(struct perf_event *bp, void *data)
5048 struct perf_sample_data sample;
5049 struct pt_regs *regs = data;
5051 perf_sample_data_init(&sample, bp->attr.bp_addr);
5053 if (!bp->hw.state && !perf_exclude_event(bp, regs))
5054 perf_swevent_event(bp, 1, &sample, regs);
5059 * hrtimer based swevent callback
5062 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
5064 enum hrtimer_restart ret = HRTIMER_RESTART;
5065 struct perf_sample_data data;
5066 struct pt_regs *regs;
5067 struct perf_event *event;
5070 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
5072 if (event->state != PERF_EVENT_STATE_ACTIVE)
5073 return HRTIMER_NORESTART;
5075 event->pmu->read(event);
5077 perf_sample_data_init(&data, 0);
5078 data.period = event->hw.last_period;
5079 regs = get_irq_regs();
5081 if (regs && !perf_exclude_event(event, regs)) {
5082 if (!(event->attr.exclude_idle && current->pid == 0))
5083 if (perf_event_overflow(event, &data, regs))
5084 ret = HRTIMER_NORESTART;
5087 period = max_t(u64, 10000, event->hw.sample_period);
5088 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
5093 static void perf_swevent_start_hrtimer(struct perf_event *event)
5095 struct hw_perf_event *hwc = &event->hw;
5098 if (!is_sampling_event(event))
5101 period = local64_read(&hwc->period_left);
5106 local64_set(&hwc->period_left, 0);
5108 period = max_t(u64, 10000, hwc->sample_period);
5110 __hrtimer_start_range_ns(&hwc->hrtimer,
5111 ns_to_ktime(period), 0,
5112 HRTIMER_MODE_REL_PINNED, 0);
5115 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
5117 struct hw_perf_event *hwc = &event->hw;
5119 if (is_sampling_event(event)) {
5120 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
5121 local64_set(&hwc->period_left, ktime_to_ns(remaining));
5123 hrtimer_cancel(&hwc->hrtimer);
5127 static void perf_swevent_init_hrtimer(struct perf_event *event)
5129 struct hw_perf_event *hwc = &event->hw;
5131 if (!is_sampling_event(event))
5134 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5135 hwc->hrtimer.function = perf_swevent_hrtimer;
5138 * Since hrtimers have a fixed rate, we can do a static freq->period
5139 * mapping and avoid the whole period adjust feedback stuff.
5141 if (event->attr.freq) {
5142 long freq = event->attr.sample_freq;
5144 event->attr.sample_period = NSEC_PER_SEC / freq;
5145 hwc->sample_period = event->attr.sample_period;
5146 local64_set(&hwc->period_left, hwc->sample_period);
5147 event->attr.freq = 0;
5152 * Software event: cpu wall time clock
5155 static void cpu_clock_event_update(struct perf_event *event)
5160 now = local_clock();
5161 prev = local64_xchg(&event->hw.prev_count, now);
5162 local64_add(now - prev, &event->count);
5165 static void cpu_clock_event_start(struct perf_event *event, int flags)
5167 local64_set(&event->hw.prev_count, local_clock());
5168 perf_swevent_start_hrtimer(event);
5171 static void cpu_clock_event_stop(struct perf_event *event, int flags)
5173 perf_swevent_cancel_hrtimer(event);
5174 cpu_clock_event_update(event);
5177 static int cpu_clock_event_add(struct perf_event *event, int flags)
5179 if (flags & PERF_EF_START)
5180 cpu_clock_event_start(event, flags);
5185 static void cpu_clock_event_del(struct perf_event *event, int flags)
5187 cpu_clock_event_stop(event, flags);
5190 static void cpu_clock_event_read(struct perf_event *event)
5192 cpu_clock_event_update(event);
5195 static int cpu_clock_event_init(struct perf_event *event)
5197 if (event->attr.type != PERF_TYPE_SOFTWARE)
5200 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
5203 perf_swevent_init_hrtimer(event);
5208 static struct pmu perf_cpu_clock = {
5209 .task_ctx_nr = perf_sw_context,
5211 .event_init = cpu_clock_event_init,
5212 .add = cpu_clock_event_add,
5213 .del = cpu_clock_event_del,
5214 .start = cpu_clock_event_start,
5215 .stop = cpu_clock_event_stop,
5216 .read = cpu_clock_event_read,
5220 * Software event: task time clock
5223 static void task_clock_event_update(struct perf_event *event, u64 now)
5228 prev = local64_xchg(&event->hw.prev_count, now);
5230 local64_add(delta, &event->count);
5233 static void task_clock_event_start(struct perf_event *event, int flags)
5235 local64_set(&event->hw.prev_count, event->ctx->time);
5236 perf_swevent_start_hrtimer(event);
5239 static void task_clock_event_stop(struct perf_event *event, int flags)
5241 perf_swevent_cancel_hrtimer(event);
5242 task_clock_event_update(event, event->ctx->time);
5245 static int task_clock_event_add(struct perf_event *event, int flags)
5247 if (flags & PERF_EF_START)
5248 task_clock_event_start(event, flags);
5253 static void task_clock_event_del(struct perf_event *event, int flags)
5255 task_clock_event_stop(event, PERF_EF_UPDATE);
5258 static void task_clock_event_read(struct perf_event *event)
5260 u64 now = perf_clock();
5261 u64 delta = now - event->ctx->timestamp;
5262 u64 time = event->ctx->time + delta;
5264 task_clock_event_update(event, time);
5267 static int task_clock_event_init(struct perf_event *event)
5269 if (event->attr.type != PERF_TYPE_SOFTWARE)
5272 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
5275 perf_swevent_init_hrtimer(event);
5280 static struct pmu perf_task_clock = {
5281 .task_ctx_nr = perf_sw_context,
5283 .event_init = task_clock_event_init,
5284 .add = task_clock_event_add,
5285 .del = task_clock_event_del,
5286 .start = task_clock_event_start,
5287 .stop = task_clock_event_stop,
5288 .read = task_clock_event_read,
5291 static void perf_pmu_nop_void(struct pmu *pmu)
5295 static int perf_pmu_nop_int(struct pmu *pmu)
5300 static void perf_pmu_start_txn(struct pmu *pmu)
5302 perf_pmu_disable(pmu);
5305 static int perf_pmu_commit_txn(struct pmu *pmu)
5307 perf_pmu_enable(pmu);
5311 static void perf_pmu_cancel_txn(struct pmu *pmu)
5313 perf_pmu_enable(pmu);
5317 * Ensures all contexts with the same task_ctx_nr have the same
5318 * pmu_cpu_context too.
5320 static void *find_pmu_context(int ctxn)
5327 list_for_each_entry(pmu, &pmus, entry) {
5328 if (pmu->task_ctx_nr == ctxn)
5329 return pmu->pmu_cpu_context;
5335 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
5339 for_each_possible_cpu(cpu) {
5340 struct perf_cpu_context *cpuctx;
5342 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5344 if (cpuctx->active_pmu == old_pmu)
5345 cpuctx->active_pmu = pmu;
5349 static void free_pmu_context(struct pmu *pmu)
5353 mutex_lock(&pmus_lock);
5355 * Like a real lame refcount.
5357 list_for_each_entry(i, &pmus, entry) {
5358 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
5359 update_pmu_context(i, pmu);
5364 free_percpu(pmu->pmu_cpu_context);
5366 mutex_unlock(&pmus_lock);
5368 static struct idr pmu_idr;
5371 type_show(struct device *dev, struct device_attribute *attr, char *page)
5373 struct pmu *pmu = dev_get_drvdata(dev);
5375 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
5378 static struct device_attribute pmu_dev_attrs[] = {
5383 static int pmu_bus_running;
5384 static struct bus_type pmu_bus = {
5385 .name = "event_source",
5386 .dev_attrs = pmu_dev_attrs,
5389 static void pmu_dev_release(struct device *dev)
5394 static int pmu_dev_alloc(struct pmu *pmu)
5398 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
5402 device_initialize(pmu->dev);
5403 ret = dev_set_name(pmu->dev, "%s", pmu->name);
5407 dev_set_drvdata(pmu->dev, pmu);
5408 pmu->dev->bus = &pmu_bus;
5409 pmu->dev->release = pmu_dev_release;
5410 ret = device_add(pmu->dev);
5418 put_device(pmu->dev);
5422 static struct lock_class_key cpuctx_mutex;
5423 static struct lock_class_key cpuctx_lock;
5425 int perf_pmu_register(struct pmu *pmu, char *name, int type)
5429 mutex_lock(&pmus_lock);
5431 pmu->pmu_disable_count = alloc_percpu(int);
5432 if (!pmu->pmu_disable_count)
5441 int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
5445 err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
5453 if (pmu_bus_running) {
5454 ret = pmu_dev_alloc(pmu);
5460 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
5461 if (pmu->pmu_cpu_context)
5462 goto got_cpu_context;
5464 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
5465 if (!pmu->pmu_cpu_context)
5468 for_each_possible_cpu(cpu) {
5469 struct perf_cpu_context *cpuctx;
5471 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5472 __perf_event_init_context(&cpuctx->ctx);
5473 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
5474 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
5475 cpuctx->ctx.type = cpu_context;
5476 cpuctx->ctx.pmu = pmu;
5477 cpuctx->jiffies_interval = 1;
5478 INIT_LIST_HEAD(&cpuctx->rotation_list);
5479 cpuctx->active_pmu = pmu;
5483 if (!pmu->start_txn) {
5484 if (pmu->pmu_enable) {
5486 * If we have pmu_enable/pmu_disable calls, install
5487 * transaction stubs that use that to try and batch
5488 * hardware accesses.
5490 pmu->start_txn = perf_pmu_start_txn;
5491 pmu->commit_txn = perf_pmu_commit_txn;
5492 pmu->cancel_txn = perf_pmu_cancel_txn;
5494 pmu->start_txn = perf_pmu_nop_void;
5495 pmu->commit_txn = perf_pmu_nop_int;
5496 pmu->cancel_txn = perf_pmu_nop_void;
5500 if (!pmu->pmu_enable) {
5501 pmu->pmu_enable = perf_pmu_nop_void;
5502 pmu->pmu_disable = perf_pmu_nop_void;
5505 list_add_rcu(&pmu->entry, &pmus);
5508 mutex_unlock(&pmus_lock);
5513 device_del(pmu->dev);
5514 put_device(pmu->dev);
5517 if (pmu->type >= PERF_TYPE_MAX)
5518 idr_remove(&pmu_idr, pmu->type);
5521 free_percpu(pmu->pmu_disable_count);
5525 void perf_pmu_unregister(struct pmu *pmu)
5527 mutex_lock(&pmus_lock);
5528 list_del_rcu(&pmu->entry);
5529 mutex_unlock(&pmus_lock);
5532 * We dereference the pmu list under both SRCU and regular RCU, so
5533 * synchronize against both of those.
5535 synchronize_srcu(&pmus_srcu);
5538 free_percpu(pmu->pmu_disable_count);
5539 if (pmu->type >= PERF_TYPE_MAX)
5540 idr_remove(&pmu_idr, pmu->type);
5541 device_del(pmu->dev);
5542 put_device(pmu->dev);
5543 free_pmu_context(pmu);
5546 struct pmu *perf_init_event(struct perf_event *event)
5548 struct pmu *pmu = NULL;
5552 idx = srcu_read_lock(&pmus_srcu);
5555 pmu = idr_find(&pmu_idr, event->attr.type);
5559 ret = pmu->event_init(event);
5565 list_for_each_entry_rcu(pmu, &pmus, entry) {
5567 ret = pmu->event_init(event);
5571 if (ret != -ENOENT) {
5576 pmu = ERR_PTR(-ENOENT);
5578 srcu_read_unlock(&pmus_srcu, idx);
5584 * Allocate and initialize a event structure
5586 static struct perf_event *
5587 perf_event_alloc(struct perf_event_attr *attr, int cpu,
5588 struct task_struct *task,
5589 struct perf_event *group_leader,
5590 struct perf_event *parent_event,
5591 perf_overflow_handler_t overflow_handler,
5595 struct perf_event *event;
5596 struct hw_perf_event *hwc;
5599 if ((unsigned)cpu >= nr_cpu_ids) {
5600 if (!task || cpu != -1)
5601 return ERR_PTR(-EINVAL);
5604 event = kzalloc(sizeof(*event), GFP_KERNEL);
5606 return ERR_PTR(-ENOMEM);
5609 * Single events are their own group leaders, with an
5610 * empty sibling list:
5613 group_leader = event;
5615 mutex_init(&event->child_mutex);
5616 INIT_LIST_HEAD(&event->child_list);
5618 INIT_LIST_HEAD(&event->group_entry);
5619 INIT_LIST_HEAD(&event->event_entry);
5620 INIT_LIST_HEAD(&event->sibling_list);
5621 init_waitqueue_head(&event->waitq);
5622 init_irq_work(&event->pending, perf_pending_event);
5624 mutex_init(&event->mmap_mutex);
5627 event->attr = *attr;
5628 event->group_leader = group_leader;
5632 event->parent = parent_event;
5634 event->ns = get_pid_ns(current->nsproxy->pid_ns);
5635 event->id = atomic64_inc_return(&perf_event_id);
5637 event->state = PERF_EVENT_STATE_INACTIVE;
5640 event->attach_state = PERF_ATTACH_TASK;
5641 #ifdef CONFIG_HAVE_HW_BREAKPOINT
5643 * hw_breakpoint is a bit difficult here..
5645 if (attr->type == PERF_TYPE_BREAKPOINT)
5646 event->hw.bp_target = task;
5650 if (!overflow_handler && parent_event) {
5651 overflow_handler = parent_event->overflow_handler;
5652 context = parent_event->overflow_handler_context;
5655 event->overflow_handler = overflow_handler;
5656 event->overflow_handler_context = context;
5659 event->state = PERF_EVENT_STATE_OFF;
5664 hwc->sample_period = attr->sample_period;
5665 if (attr->freq && attr->sample_freq)
5666 hwc->sample_period = 1;
5667 hwc->last_period = hwc->sample_period;
5669 local64_set(&hwc->period_left, hwc->sample_period);
5672 * we currently do not support PERF_FORMAT_GROUP on inherited events
5674 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
5677 pmu = perf_init_event(event);
5683 else if (IS_ERR(pmu))
5688 put_pid_ns(event->ns);
5690 return ERR_PTR(err);
5693 if (!event->parent) {
5694 if (event->attach_state & PERF_ATTACH_TASK)
5695 jump_label_inc(&perf_sched_events);
5696 if (event->attr.mmap || event->attr.mmap_data)
5697 atomic_inc(&nr_mmap_events);
5698 if (event->attr.comm)
5699 atomic_inc(&nr_comm_events);
5700 if (event->attr.task)
5701 atomic_inc(&nr_task_events);
5702 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
5703 err = get_callchain_buffers();
5706 return ERR_PTR(err);
5714 static int perf_copy_attr(struct perf_event_attr __user *uattr,
5715 struct perf_event_attr *attr)
5720 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
5724 * zero the full structure, so that a short copy will be nice.
5726 memset(attr, 0, sizeof(*attr));
5728 ret = get_user(size, &uattr->size);
5732 if (size > PAGE_SIZE) /* silly large */
5735 if (!size) /* abi compat */
5736 size = PERF_ATTR_SIZE_VER0;
5738 if (size < PERF_ATTR_SIZE_VER0)
5742 * If we're handed a bigger struct than we know of,
5743 * ensure all the unknown bits are 0 - i.e. new
5744 * user-space does not rely on any kernel feature
5745 * extensions we dont know about yet.
5747 if (size > sizeof(*attr)) {
5748 unsigned char __user *addr;
5749 unsigned char __user *end;
5752 addr = (void __user *)uattr + sizeof(*attr);
5753 end = (void __user *)uattr + size;
5755 for (; addr < end; addr++) {
5756 ret = get_user(val, addr);
5762 size = sizeof(*attr);
5765 ret = copy_from_user(attr, uattr, size);
5769 if (attr->__reserved_1)
5772 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
5775 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
5782 put_user(sizeof(*attr), &uattr->size);
5788 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
5790 struct ring_buffer *rb = NULL, *old_rb = NULL;
5796 /* don't allow circular references */
5797 if (event == output_event)
5801 * Don't allow cross-cpu buffers
5803 if (output_event->cpu != event->cpu)
5807 * If its not a per-cpu rb, it must be the same task.
5809 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
5813 mutex_lock(&event->mmap_mutex);
5814 /* Can't redirect output if we've got an active mmap() */
5815 if (atomic_read(&event->mmap_count))
5819 /* get the rb we want to redirect to */
5820 rb = ring_buffer_get(output_event);
5826 rcu_assign_pointer(event->rb, rb);
5829 mutex_unlock(&event->mmap_mutex);
5832 ring_buffer_put(old_rb);
5838 * sys_perf_event_open - open a performance event, associate it to a task/cpu
5840 * @attr_uptr: event_id type attributes for monitoring/sampling
5843 * @group_fd: group leader event fd
5845 SYSCALL_DEFINE5(perf_event_open,
5846 struct perf_event_attr __user *, attr_uptr,
5847 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
5849 struct perf_event *group_leader = NULL, *output_event = NULL;
5850 struct perf_event *event, *sibling;
5851 struct perf_event_attr attr;
5852 struct perf_event_context *ctx;
5853 struct file *event_file = NULL;
5854 struct file *group_file = NULL;
5855 struct task_struct *task = NULL;
5859 int fput_needed = 0;
5862 /* for future expandability... */
5863 if (flags & ~PERF_FLAG_ALL)
5866 err = perf_copy_attr(attr_uptr, &attr);
5870 if (!attr.exclude_kernel) {
5871 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
5876 if (attr.sample_freq > sysctl_perf_event_sample_rate)
5881 * In cgroup mode, the pid argument is used to pass the fd
5882 * opened to the cgroup directory in cgroupfs. The cpu argument
5883 * designates the cpu on which to monitor threads from that
5886 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
5889 event_fd = get_unused_fd_flags(O_RDWR);
5893 if (group_fd != -1) {
5894 group_leader = perf_fget_light(group_fd, &fput_needed);
5895 if (IS_ERR(group_leader)) {
5896 err = PTR_ERR(group_leader);
5899 group_file = group_leader->filp;
5900 if (flags & PERF_FLAG_FD_OUTPUT)
5901 output_event = group_leader;
5902 if (flags & PERF_FLAG_FD_NO_GROUP)
5903 group_leader = NULL;
5906 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
5907 task = find_lively_task_by_vpid(pid);
5909 err = PTR_ERR(task);
5914 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
5916 if (IS_ERR(event)) {
5917 err = PTR_ERR(event);
5921 if (flags & PERF_FLAG_PID_CGROUP) {
5922 err = perf_cgroup_connect(pid, event, &attr, group_leader);
5927 * - that has cgroup constraint on event->cpu
5928 * - that may need work on context switch
5930 atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
5931 jump_label_inc(&perf_sched_events);
5935 * Special case software events and allow them to be part of
5936 * any hardware group.
5941 (is_software_event(event) != is_software_event(group_leader))) {
5942 if (is_software_event(event)) {
5944 * If event and group_leader are not both a software
5945 * event, and event is, then group leader is not.
5947 * Allow the addition of software events to !software
5948 * groups, this is safe because software events never
5951 pmu = group_leader->pmu;
5952 } else if (is_software_event(group_leader) &&
5953 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
5955 * In case the group is a pure software group, and we
5956 * try to add a hardware event, move the whole group to
5957 * the hardware context.
5964 * Get the target context (task or percpu):
5966 ctx = find_get_context(pmu, task, cpu);
5973 put_task_struct(task);
5978 * Look up the group leader (we will attach this event to it):
5984 * Do not allow a recursive hierarchy (this new sibling
5985 * becoming part of another group-sibling):
5987 if (group_leader->group_leader != group_leader)
5990 * Do not allow to attach to a group in a different
5991 * task or CPU context:
5994 if (group_leader->ctx->type != ctx->type)
5997 if (group_leader->ctx != ctx)
6002 * Only a group leader can be exclusive or pinned
6004 if (attr.exclusive || attr.pinned)
6009 err = perf_event_set_output(event, output_event);
6014 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
6015 if (IS_ERR(event_file)) {
6016 err = PTR_ERR(event_file);
6021 struct perf_event_context *gctx = group_leader->ctx;
6023 mutex_lock(&gctx->mutex);
6024 perf_remove_from_context(group_leader);
6025 list_for_each_entry(sibling, &group_leader->sibling_list,
6027 perf_remove_from_context(sibling);
6030 mutex_unlock(&gctx->mutex);
6034 event->filp = event_file;
6035 WARN_ON_ONCE(ctx->parent_ctx);
6036 mutex_lock(&ctx->mutex);
6039 perf_install_in_context(ctx, group_leader, cpu);
6041 list_for_each_entry(sibling, &group_leader->sibling_list,
6043 perf_install_in_context(ctx, sibling, cpu);
6048 perf_install_in_context(ctx, event, cpu);
6050 perf_unpin_context(ctx);
6051 mutex_unlock(&ctx->mutex);
6053 event->owner = current;
6055 mutex_lock(¤t->perf_event_mutex);
6056 list_add_tail(&event->owner_entry, ¤t->perf_event_list);
6057 mutex_unlock(¤t->perf_event_mutex);
6060 * Precalculate sample_data sizes
6062 perf_event__header_size(event);
6063 perf_event__id_header_size(event);
6066 * Drop the reference on the group_event after placing the
6067 * new event on the sibling_list. This ensures destruction
6068 * of the group leader will find the pointer to itself in
6069 * perf_group_detach().
6071 fput_light(group_file, fput_needed);
6072 fd_install(event_fd, event_file);
6076 perf_unpin_context(ctx);
6082 put_task_struct(task);
6084 fput_light(group_file, fput_needed);
6086 put_unused_fd(event_fd);
6091 * perf_event_create_kernel_counter
6093 * @attr: attributes of the counter to create
6094 * @cpu: cpu in which the counter is bound
6095 * @task: task to profile (NULL for percpu)
6098 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
6099 struct task_struct *task,
6100 perf_overflow_handler_t overflow_handler,
6103 struct perf_event_context *ctx;
6104 struct perf_event *event;
6108 * Get the target context (task or percpu):
6111 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
6112 overflow_handler, context);
6113 if (IS_ERR(event)) {
6114 err = PTR_ERR(event);
6118 ctx = find_get_context(event->pmu, task, cpu);
6125 WARN_ON_ONCE(ctx->parent_ctx);
6126 mutex_lock(&ctx->mutex);
6127 perf_install_in_context(ctx, event, cpu);
6129 perf_unpin_context(ctx);
6130 mutex_unlock(&ctx->mutex);
6137 return ERR_PTR(err);
6139 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
6141 static void sync_child_event(struct perf_event *child_event,
6142 struct task_struct *child)
6144 struct perf_event *parent_event = child_event->parent;
6147 if (child_event->attr.inherit_stat)
6148 perf_event_read_event(child_event, child);
6150 child_val = perf_event_count(child_event);
6153 * Add back the child's count to the parent's count:
6155 atomic64_add(child_val, &parent_event->child_count);
6156 atomic64_add(child_event->total_time_enabled,
6157 &parent_event->child_total_time_enabled);
6158 atomic64_add(child_event->total_time_running,
6159 &parent_event->child_total_time_running);
6162 * Remove this event from the parent's list
6164 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6165 mutex_lock(&parent_event->child_mutex);
6166 list_del_init(&child_event->child_list);
6167 mutex_unlock(&parent_event->child_mutex);
6170 * Release the parent event, if this was the last
6173 fput(parent_event->filp);
6177 __perf_event_exit_task(struct perf_event *child_event,
6178 struct perf_event_context *child_ctx,
6179 struct task_struct *child)
6181 if (child_event->parent) {
6182 raw_spin_lock_irq(&child_ctx->lock);
6183 perf_group_detach(child_event);
6184 raw_spin_unlock_irq(&child_ctx->lock);
6187 perf_remove_from_context(child_event);
6190 * It can happen that the parent exits first, and has events
6191 * that are still around due to the child reference. These
6192 * events need to be zapped.
6194 if (child_event->parent) {
6195 sync_child_event(child_event, child);
6196 free_event(child_event);
6200 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
6202 struct perf_event *child_event, *tmp;
6203 struct perf_event_context *child_ctx;
6204 unsigned long flags;
6206 if (likely(!child->perf_event_ctxp[ctxn])) {
6207 perf_event_task(child, NULL, 0);
6211 local_irq_save(flags);
6213 * We can't reschedule here because interrupts are disabled,
6214 * and either child is current or it is a task that can't be
6215 * scheduled, so we are now safe from rescheduling changing
6218 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
6221 * Take the context lock here so that if find_get_context is
6222 * reading child->perf_event_ctxp, we wait until it has
6223 * incremented the context's refcount before we do put_ctx below.
6225 raw_spin_lock(&child_ctx->lock);
6226 task_ctx_sched_out(child_ctx);
6227 child->perf_event_ctxp[ctxn] = NULL;
6229 * If this context is a clone; unclone it so it can't get
6230 * swapped to another process while we're removing all
6231 * the events from it.
6233 unclone_ctx(child_ctx);
6234 update_context_time(child_ctx);
6235 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
6238 * Report the task dead after unscheduling the events so that we
6239 * won't get any samples after PERF_RECORD_EXIT. We can however still
6240 * get a few PERF_RECORD_READ events.
6242 perf_event_task(child, child_ctx, 0);
6245 * We can recurse on the same lock type through:
6247 * __perf_event_exit_task()
6248 * sync_child_event()
6249 * fput(parent_event->filp)
6251 * mutex_lock(&ctx->mutex)
6253 * But since its the parent context it won't be the same instance.
6255 mutex_lock(&child_ctx->mutex);
6258 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
6260 __perf_event_exit_task(child_event, child_ctx, child);
6262 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
6264 __perf_event_exit_task(child_event, child_ctx, child);
6267 * If the last event was a group event, it will have appended all
6268 * its siblings to the list, but we obtained 'tmp' before that which
6269 * will still point to the list head terminating the iteration.
6271 if (!list_empty(&child_ctx->pinned_groups) ||
6272 !list_empty(&child_ctx->flexible_groups))
6275 mutex_unlock(&child_ctx->mutex);
6281 * When a child task exits, feed back event values to parent events.
6283 void perf_event_exit_task(struct task_struct *child)
6285 struct perf_event *event, *tmp;
6288 mutex_lock(&child->perf_event_mutex);
6289 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
6291 list_del_init(&event->owner_entry);
6294 * Ensure the list deletion is visible before we clear
6295 * the owner, closes a race against perf_release() where
6296 * we need to serialize on the owner->perf_event_mutex.
6299 event->owner = NULL;
6301 mutex_unlock(&child->perf_event_mutex);
6303 for_each_task_context_nr(ctxn)
6304 perf_event_exit_task_context(child, ctxn);
6307 static void perf_free_event(struct perf_event *event,
6308 struct perf_event_context *ctx)
6310 struct perf_event *parent = event->parent;
6312 if (WARN_ON_ONCE(!parent))
6315 mutex_lock(&parent->child_mutex);
6316 list_del_init(&event->child_list);
6317 mutex_unlock(&parent->child_mutex);
6321 perf_group_detach(event);
6322 list_del_event(event, ctx);
6327 * free an unexposed, unused context as created by inheritance by
6328 * perf_event_init_task below, used by fork() in case of fail.
6330 void perf_event_free_task(struct task_struct *task)
6332 struct perf_event_context *ctx;
6333 struct perf_event *event, *tmp;
6336 for_each_task_context_nr(ctxn) {
6337 ctx = task->perf_event_ctxp[ctxn];
6341 mutex_lock(&ctx->mutex);
6343 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
6345 perf_free_event(event, ctx);
6347 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
6349 perf_free_event(event, ctx);
6351 if (!list_empty(&ctx->pinned_groups) ||
6352 !list_empty(&ctx->flexible_groups))
6355 mutex_unlock(&ctx->mutex);
6361 void perf_event_delayed_put(struct task_struct *task)
6365 for_each_task_context_nr(ctxn)
6366 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
6370 * inherit a event from parent task to child task:
6372 static struct perf_event *
6373 inherit_event(struct perf_event *parent_event,
6374 struct task_struct *parent,
6375 struct perf_event_context *parent_ctx,
6376 struct task_struct *child,
6377 struct perf_event *group_leader,
6378 struct perf_event_context *child_ctx)
6380 struct perf_event *child_event;
6381 unsigned long flags;
6384 * Instead of creating recursive hierarchies of events,
6385 * we link inherited events back to the original parent,
6386 * which has a filp for sure, which we use as the reference
6389 if (parent_event->parent)
6390 parent_event = parent_event->parent;
6392 child_event = perf_event_alloc(&parent_event->attr,
6395 group_leader, parent_event,
6397 if (IS_ERR(child_event))
6402 * Make the child state follow the state of the parent event,
6403 * not its attr.disabled bit. We hold the parent's mutex,
6404 * so we won't race with perf_event_{en, dis}able_family.
6406 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
6407 child_event->state = PERF_EVENT_STATE_INACTIVE;
6409 child_event->state = PERF_EVENT_STATE_OFF;
6411 if (parent_event->attr.freq) {
6412 u64 sample_period = parent_event->hw.sample_period;
6413 struct hw_perf_event *hwc = &child_event->hw;
6415 hwc->sample_period = sample_period;
6416 hwc->last_period = sample_period;
6418 local64_set(&hwc->period_left, sample_period);
6421 child_event->ctx = child_ctx;
6422 child_event->overflow_handler = parent_event->overflow_handler;
6423 child_event->overflow_handler_context
6424 = parent_event->overflow_handler_context;
6427 * Precalculate sample_data sizes
6429 perf_event__header_size(child_event);
6430 perf_event__id_header_size(child_event);
6433 * Link it up in the child's context:
6435 raw_spin_lock_irqsave(&child_ctx->lock, flags);
6436 add_event_to_ctx(child_event, child_ctx);
6437 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
6440 * Get a reference to the parent filp - we will fput it
6441 * when the child event exits. This is safe to do because
6442 * we are in the parent and we know that the filp still
6443 * exists and has a nonzero count:
6445 atomic_long_inc(&parent_event->filp->f_count);
6448 * Link this into the parent event's child list
6450 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6451 mutex_lock(&parent_event->child_mutex);
6452 list_add_tail(&child_event->child_list, &parent_event->child_list);
6453 mutex_unlock(&parent_event->child_mutex);
6458 static int inherit_group(struct perf_event *parent_event,
6459 struct task_struct *parent,
6460 struct perf_event_context *parent_ctx,
6461 struct task_struct *child,
6462 struct perf_event_context *child_ctx)
6464 struct perf_event *leader;
6465 struct perf_event *sub;
6466 struct perf_event *child_ctr;
6468 leader = inherit_event(parent_event, parent, parent_ctx,
6469 child, NULL, child_ctx);
6471 return PTR_ERR(leader);
6472 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
6473 child_ctr = inherit_event(sub, parent, parent_ctx,
6474 child, leader, child_ctx);
6475 if (IS_ERR(child_ctr))
6476 return PTR_ERR(child_ctr);
6482 inherit_task_group(struct perf_event *event, struct task_struct *parent,
6483 struct perf_event_context *parent_ctx,
6484 struct task_struct *child, int ctxn,
6488 struct perf_event_context *child_ctx;
6490 if (!event->attr.inherit) {
6495 child_ctx = child->perf_event_ctxp[ctxn];
6498 * This is executed from the parent task context, so
6499 * inherit events that have been marked for cloning.
6500 * First allocate and initialize a context for the
6504 child_ctx = alloc_perf_context(event->pmu, child);
6508 child->perf_event_ctxp[ctxn] = child_ctx;
6511 ret = inherit_group(event, parent, parent_ctx,
6521 * Initialize the perf_event context in task_struct
6523 int perf_event_init_context(struct task_struct *child, int ctxn)
6525 struct perf_event_context *child_ctx, *parent_ctx;
6526 struct perf_event_context *cloned_ctx;
6527 struct perf_event *event;
6528 struct task_struct *parent = current;
6529 int inherited_all = 1;
6530 unsigned long flags;
6533 if (likely(!parent->perf_event_ctxp[ctxn]))
6537 * If the parent's context is a clone, pin it so it won't get
6540 parent_ctx = perf_pin_task_context(parent, ctxn);
6543 * No need to check if parent_ctx != NULL here; since we saw
6544 * it non-NULL earlier, the only reason for it to become NULL
6545 * is if we exit, and since we're currently in the middle of
6546 * a fork we can't be exiting at the same time.
6550 * Lock the parent list. No need to lock the child - not PID
6551 * hashed yet and not running, so nobody can access it.
6553 mutex_lock(&parent_ctx->mutex);
6556 * We dont have to disable NMIs - we are only looking at
6557 * the list, not manipulating it:
6559 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
6560 ret = inherit_task_group(event, parent, parent_ctx,
6561 child, ctxn, &inherited_all);
6567 * We can't hold ctx->lock when iterating the ->flexible_group list due
6568 * to allocations, but we need to prevent rotation because
6569 * rotate_ctx() will change the list from interrupt context.
6571 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6572 parent_ctx->rotate_disable = 1;
6573 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6575 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
6576 ret = inherit_task_group(event, parent, parent_ctx,
6577 child, ctxn, &inherited_all);
6582 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6583 parent_ctx->rotate_disable = 0;
6585 child_ctx = child->perf_event_ctxp[ctxn];
6587 if (child_ctx && inherited_all) {
6589 * Mark the child context as a clone of the parent
6590 * context, or of whatever the parent is a clone of.
6592 * Note that if the parent is a clone, the holding of
6593 * parent_ctx->lock avoids it from being uncloned.
6595 cloned_ctx = parent_ctx->parent_ctx;
6597 child_ctx->parent_ctx = cloned_ctx;
6598 child_ctx->parent_gen = parent_ctx->parent_gen;
6600 child_ctx->parent_ctx = parent_ctx;
6601 child_ctx->parent_gen = parent_ctx->generation;
6603 get_ctx(child_ctx->parent_ctx);
6606 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6607 mutex_unlock(&parent_ctx->mutex);
6609 perf_unpin_context(parent_ctx);
6610 put_ctx(parent_ctx);
6616 * Initialize the perf_event context in task_struct
6618 int perf_event_init_task(struct task_struct *child)
6622 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
6623 mutex_init(&child->perf_event_mutex);
6624 INIT_LIST_HEAD(&child->perf_event_list);
6626 for_each_task_context_nr(ctxn) {
6627 ret = perf_event_init_context(child, ctxn);
6635 static void __init perf_event_init_all_cpus(void)
6637 struct swevent_htable *swhash;
6640 for_each_possible_cpu(cpu) {
6641 swhash = &per_cpu(swevent_htable, cpu);
6642 mutex_init(&swhash->hlist_mutex);
6643 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
6647 static void __cpuinit perf_event_init_cpu(int cpu)
6649 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6651 mutex_lock(&swhash->hlist_mutex);
6652 if (swhash->hlist_refcount > 0) {
6653 struct swevent_hlist *hlist;
6655 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
6657 rcu_assign_pointer(swhash->swevent_hlist, hlist);
6659 mutex_unlock(&swhash->hlist_mutex);
6662 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
6663 static void perf_pmu_rotate_stop(struct pmu *pmu)
6665 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6667 WARN_ON(!irqs_disabled());
6669 list_del_init(&cpuctx->rotation_list);
6672 static void __perf_event_exit_context(void *__info)
6674 struct perf_event_context *ctx = __info;
6675 struct perf_event *event, *tmp;
6677 perf_pmu_rotate_stop(ctx->pmu);
6679 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
6680 __perf_remove_from_context(event);
6681 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
6682 __perf_remove_from_context(event);
6685 static void perf_event_exit_cpu_context(int cpu)
6687 struct perf_event_context *ctx;
6691 idx = srcu_read_lock(&pmus_srcu);
6692 list_for_each_entry_rcu(pmu, &pmus, entry) {
6693 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
6695 mutex_lock(&ctx->mutex);
6696 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
6697 mutex_unlock(&ctx->mutex);
6699 srcu_read_unlock(&pmus_srcu, idx);
6702 static void perf_event_exit_cpu(int cpu)
6704 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6706 mutex_lock(&swhash->hlist_mutex);
6707 swevent_hlist_release(swhash);
6708 mutex_unlock(&swhash->hlist_mutex);
6710 perf_event_exit_cpu_context(cpu);
6713 static inline void perf_event_exit_cpu(int cpu) { }
6717 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
6721 for_each_online_cpu(cpu)
6722 perf_event_exit_cpu(cpu);
6728 * Run the perf reboot notifier at the very last possible moment so that
6729 * the generic watchdog code runs as long as possible.
6731 static struct notifier_block perf_reboot_notifier = {
6732 .notifier_call = perf_reboot,
6733 .priority = INT_MIN,
6736 static int __cpuinit
6737 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
6739 unsigned int cpu = (long)hcpu;
6741 switch (action & ~CPU_TASKS_FROZEN) {
6743 case CPU_UP_PREPARE:
6744 case CPU_DOWN_FAILED:
6745 perf_event_init_cpu(cpu);
6748 case CPU_UP_CANCELED:
6749 case CPU_DOWN_PREPARE:
6750 perf_event_exit_cpu(cpu);
6760 void __init perf_event_init(void)
6766 perf_event_init_all_cpus();
6767 init_srcu_struct(&pmus_srcu);
6768 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
6769 perf_pmu_register(&perf_cpu_clock, NULL, -1);
6770 perf_pmu_register(&perf_task_clock, NULL, -1);
6772 perf_cpu_notifier(perf_cpu_notify);
6773 register_reboot_notifier(&perf_reboot_notifier);
6775 ret = init_hw_breakpoint();
6776 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
6779 static int __init perf_event_sysfs_init(void)
6784 mutex_lock(&pmus_lock);
6786 ret = bus_register(&pmu_bus);
6790 list_for_each_entry(pmu, &pmus, entry) {
6791 if (!pmu->name || pmu->type < 0)
6794 ret = pmu_dev_alloc(pmu);
6795 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
6797 pmu_bus_running = 1;
6801 mutex_unlock(&pmus_lock);
6805 device_initcall(perf_event_sysfs_init);
6807 #ifdef CONFIG_CGROUP_PERF
6808 static struct cgroup_subsys_state *perf_cgroup_create(
6809 struct cgroup_subsys *ss, struct cgroup *cont)
6811 struct perf_cgroup *jc;
6813 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
6815 return ERR_PTR(-ENOMEM);
6817 jc->info = alloc_percpu(struct perf_cgroup_info);
6820 return ERR_PTR(-ENOMEM);
6826 static void perf_cgroup_destroy(struct cgroup_subsys *ss,
6827 struct cgroup *cont)
6829 struct perf_cgroup *jc;
6830 jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
6831 struct perf_cgroup, css);
6832 free_percpu(jc->info);
6836 static int __perf_cgroup_move(void *info)
6838 struct task_struct *task = info;
6839 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
6844 perf_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *task)
6846 task_function_call(task, __perf_cgroup_move, task);
6849 static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
6850 struct cgroup *old_cgrp, struct task_struct *task)
6853 * cgroup_exit() is called in the copy_process() failure path.
6854 * Ignore this case since the task hasn't ran yet, this avoids
6855 * trying to poke a half freed task state from generic code.
6857 if (!(task->flags & PF_EXITING))
6860 perf_cgroup_attach_task(cgrp, task);
6863 struct cgroup_subsys perf_subsys = {
6864 .name = "perf_event",
6865 .subsys_id = perf_subsys_id,
6866 .create = perf_cgroup_create,
6867 .destroy = perf_cgroup_destroy,
6868 .exit = perf_cgroup_exit,
6869 .attach_task = perf_cgroup_attach_task,
6871 #endif /* CONFIG_CGROUP_PERF */