2 * Performance events core code:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/slab.h>
19 #include <linux/hash.h>
20 #include <linux/sysfs.h>
21 #include <linux/dcache.h>
22 #include <linux/percpu.h>
23 #include <linux/ptrace.h>
24 #include <linux/vmstat.h>
25 #include <linux/vmalloc.h>
26 #include <linux/hardirq.h>
27 #include <linux/rculist.h>
28 #include <linux/uaccess.h>
29 #include <linux/syscalls.h>
30 #include <linux/anon_inodes.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/perf_event.h>
33 #include <linux/ftrace_event.h>
34 #include <linux/hw_breakpoint.h>
36 #include <asm/irq_regs.h>
38 atomic_t perf_task_events __read_mostly;
39 static atomic_t nr_mmap_events __read_mostly;
40 static atomic_t nr_comm_events __read_mostly;
41 static atomic_t nr_task_events __read_mostly;
43 static LIST_HEAD(pmus);
44 static DEFINE_MUTEX(pmus_lock);
45 static struct srcu_struct pmus_srcu;
48 * perf event paranoia level:
49 * -1 - not paranoid at all
50 * 0 - disallow raw tracepoint access for unpriv
51 * 1 - disallow cpu events for unpriv
52 * 2 - disallow kernel profiling for unpriv
54 int sysctl_perf_event_paranoid __read_mostly = 1;
56 int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
59 * max perf event sample rate
61 int sysctl_perf_event_sample_rate __read_mostly = 100000;
63 static atomic64_t perf_event_id;
65 void __weak perf_event_print_debug(void) { }
67 extern __weak const char *perf_pmu_name(void)
72 void perf_pmu_disable(struct pmu *pmu)
74 int *count = this_cpu_ptr(pmu->pmu_disable_count);
76 pmu->pmu_disable(pmu);
79 void perf_pmu_enable(struct pmu *pmu)
81 int *count = this_cpu_ptr(pmu->pmu_disable_count);
86 static DEFINE_PER_CPU(struct list_head, rotation_list);
89 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
90 * because they're strictly cpu affine and rotate_start is called with IRQs
91 * disabled, while rotate_context is called from IRQ context.
93 static void perf_pmu_rotate_start(struct pmu *pmu)
95 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
96 struct list_head *head = &__get_cpu_var(rotation_list);
98 WARN_ON(!irqs_disabled());
100 if (list_empty(&cpuctx->rotation_list))
101 list_add(&cpuctx->rotation_list, head);
104 static void get_ctx(struct perf_event_context *ctx)
106 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
109 static void free_ctx(struct rcu_head *head)
111 struct perf_event_context *ctx;
113 ctx = container_of(head, struct perf_event_context, rcu_head);
117 static void put_ctx(struct perf_event_context *ctx)
119 if (atomic_dec_and_test(&ctx->refcount)) {
121 put_ctx(ctx->parent_ctx);
123 put_task_struct(ctx->task);
124 call_rcu(&ctx->rcu_head, free_ctx);
128 static void unclone_ctx(struct perf_event_context *ctx)
130 if (ctx->parent_ctx) {
131 put_ctx(ctx->parent_ctx);
132 ctx->parent_ctx = NULL;
136 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
139 * only top level events have the pid namespace they were created in
142 event = event->parent;
144 return task_tgid_nr_ns(p, event->ns);
147 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
150 * only top level events have the pid namespace they were created in
153 event = event->parent;
155 return task_pid_nr_ns(p, event->ns);
159 * If we inherit events we want to return the parent event id
162 static u64 primary_event_id(struct perf_event *event)
167 id = event->parent->id;
173 * Get the perf_event_context for a task and lock it.
174 * This has to cope with with the fact that until it is locked,
175 * the context could get moved to another task.
177 static struct perf_event_context *
178 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
180 struct perf_event_context *ctx;
184 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
187 * If this context is a clone of another, it might
188 * get swapped for another underneath us by
189 * perf_event_task_sched_out, though the
190 * rcu_read_lock() protects us from any context
191 * getting freed. Lock the context and check if it
192 * got swapped before we could get the lock, and retry
193 * if so. If we locked the right context, then it
194 * can't get swapped on us any more.
196 raw_spin_lock_irqsave(&ctx->lock, *flags);
197 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
198 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
202 if (!atomic_inc_not_zero(&ctx->refcount)) {
203 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
212 * Get the context for a task and increment its pin_count so it
213 * can't get swapped to another task. This also increments its
214 * reference count so that the context can't get freed.
216 static struct perf_event_context *
217 perf_pin_task_context(struct task_struct *task, int ctxn)
219 struct perf_event_context *ctx;
222 ctx = perf_lock_task_context(task, ctxn, &flags);
225 raw_spin_unlock_irqrestore(&ctx->lock, flags);
230 static void perf_unpin_context(struct perf_event_context *ctx)
234 raw_spin_lock_irqsave(&ctx->lock, flags);
236 raw_spin_unlock_irqrestore(&ctx->lock, flags);
240 static inline u64 perf_clock(void)
242 return local_clock();
246 * Update the record of the current time in a context.
248 static void update_context_time(struct perf_event_context *ctx)
250 u64 now = perf_clock();
252 ctx->time += now - ctx->timestamp;
253 ctx->timestamp = now;
257 * Update the total_time_enabled and total_time_running fields for a event.
259 static void update_event_times(struct perf_event *event)
261 struct perf_event_context *ctx = event->ctx;
264 if (event->state < PERF_EVENT_STATE_INACTIVE ||
265 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
271 run_end = event->tstamp_stopped;
273 event->total_time_enabled = run_end - event->tstamp_enabled;
275 if (event->state == PERF_EVENT_STATE_INACTIVE)
276 run_end = event->tstamp_stopped;
280 event->total_time_running = run_end - event->tstamp_running;
284 * Update total_time_enabled and total_time_running for all events in a group.
286 static void update_group_times(struct perf_event *leader)
288 struct perf_event *event;
290 update_event_times(leader);
291 list_for_each_entry(event, &leader->sibling_list, group_entry)
292 update_event_times(event);
295 static struct list_head *
296 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
298 if (event->attr.pinned)
299 return &ctx->pinned_groups;
301 return &ctx->flexible_groups;
305 * Add a event from the lists for its context.
306 * Must be called with ctx->mutex and ctx->lock held.
309 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
311 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
312 event->attach_state |= PERF_ATTACH_CONTEXT;
315 * If we're a stand alone event or group leader, we go to the context
316 * list, group events are kept attached to the group so that
317 * perf_group_detach can, at all times, locate all siblings.
319 if (event->group_leader == event) {
320 struct list_head *list;
322 if (is_software_event(event))
323 event->group_flags |= PERF_GROUP_SOFTWARE;
325 list = ctx_group_list(event, ctx);
326 list_add_tail(&event->group_entry, list);
329 list_add_rcu(&event->event_entry, &ctx->event_list);
331 perf_pmu_rotate_start(ctx->pmu);
333 if (event->attr.inherit_stat)
338 * Called at perf_event creation and when events are attached/detached from a
341 static void perf_event__read_size(struct perf_event *event)
343 int entry = sizeof(u64); /* value */
347 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
350 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
353 if (event->attr.read_format & PERF_FORMAT_ID)
354 entry += sizeof(u64);
356 if (event->attr.read_format & PERF_FORMAT_GROUP) {
357 nr += event->group_leader->nr_siblings;
362 event->read_size = size;
365 static void perf_event__header_size(struct perf_event *event)
367 struct perf_sample_data *data;
368 u64 sample_type = event->attr.sample_type;
371 perf_event__read_size(event);
373 if (sample_type & PERF_SAMPLE_IP)
374 size += sizeof(data->ip);
376 if (sample_type & PERF_SAMPLE_ADDR)
377 size += sizeof(data->addr);
379 if (sample_type & PERF_SAMPLE_PERIOD)
380 size += sizeof(data->period);
382 if (sample_type & PERF_SAMPLE_READ)
383 size += event->read_size;
385 event->header_size = size;
388 static void perf_event__id_header_size(struct perf_event *event)
390 struct perf_sample_data *data;
391 u64 sample_type = event->attr.sample_type;
394 if (sample_type & PERF_SAMPLE_TID)
395 size += sizeof(data->tid_entry);
397 if (sample_type & PERF_SAMPLE_TIME)
398 size += sizeof(data->time);
400 if (sample_type & PERF_SAMPLE_ID)
401 size += sizeof(data->id);
403 if (sample_type & PERF_SAMPLE_STREAM_ID)
404 size += sizeof(data->stream_id);
406 if (sample_type & PERF_SAMPLE_CPU)
407 size += sizeof(data->cpu_entry);
409 event->id_header_size = size;
412 static void perf_group_attach(struct perf_event *event)
414 struct perf_event *group_leader = event->group_leader, *pos;
417 * We can have double attach due to group movement in perf_event_open.
419 if (event->attach_state & PERF_ATTACH_GROUP)
422 event->attach_state |= PERF_ATTACH_GROUP;
424 if (group_leader == event)
427 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
428 !is_software_event(event))
429 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
431 list_add_tail(&event->group_entry, &group_leader->sibling_list);
432 group_leader->nr_siblings++;
434 perf_event__header_size(group_leader);
436 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
437 perf_event__header_size(pos);
441 * Remove a event from the lists for its context.
442 * Must be called with ctx->mutex and ctx->lock held.
445 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
448 * We can have double detach due to exit/hot-unplug + close.
450 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
453 event->attach_state &= ~PERF_ATTACH_CONTEXT;
456 if (event->attr.inherit_stat)
459 list_del_rcu(&event->event_entry);
461 if (event->group_leader == event)
462 list_del_init(&event->group_entry);
464 update_group_times(event);
467 * If event was in error state, then keep it
468 * that way, otherwise bogus counts will be
469 * returned on read(). The only way to get out
470 * of error state is by explicit re-enabling
473 if (event->state > PERF_EVENT_STATE_OFF)
474 event->state = PERF_EVENT_STATE_OFF;
477 static void perf_group_detach(struct perf_event *event)
479 struct perf_event *sibling, *tmp;
480 struct list_head *list = NULL;
483 * We can have double detach due to exit/hot-unplug + close.
485 if (!(event->attach_state & PERF_ATTACH_GROUP))
488 event->attach_state &= ~PERF_ATTACH_GROUP;
491 * If this is a sibling, remove it from its group.
493 if (event->group_leader != event) {
494 list_del_init(&event->group_entry);
495 event->group_leader->nr_siblings--;
499 if (!list_empty(&event->group_entry))
500 list = &event->group_entry;
503 * If this was a group event with sibling events then
504 * upgrade the siblings to singleton events by adding them
505 * to whatever list we are on.
507 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
509 list_move_tail(&sibling->group_entry, list);
510 sibling->group_leader = sibling;
512 /* Inherit group flags from the previous leader */
513 sibling->group_flags = event->group_flags;
517 perf_event__header_size(event->group_leader);
519 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
520 perf_event__header_size(tmp);
524 event_filter_match(struct perf_event *event)
526 return event->cpu == -1 || event->cpu == smp_processor_id();
530 event_sched_out(struct perf_event *event,
531 struct perf_cpu_context *cpuctx,
532 struct perf_event_context *ctx)
536 * An event which could not be activated because of
537 * filter mismatch still needs to have its timings
538 * maintained, otherwise bogus information is return
539 * via read() for time_enabled, time_running:
541 if (event->state == PERF_EVENT_STATE_INACTIVE
542 && !event_filter_match(event)) {
543 delta = ctx->time - event->tstamp_stopped;
544 event->tstamp_running += delta;
545 event->tstamp_stopped = ctx->time;
548 if (event->state != PERF_EVENT_STATE_ACTIVE)
551 event->state = PERF_EVENT_STATE_INACTIVE;
552 if (event->pending_disable) {
553 event->pending_disable = 0;
554 event->state = PERF_EVENT_STATE_OFF;
556 event->tstamp_stopped = ctx->time;
557 event->pmu->del(event, 0);
560 if (!is_software_event(event))
561 cpuctx->active_oncpu--;
563 if (event->attr.exclusive || !cpuctx->active_oncpu)
564 cpuctx->exclusive = 0;
568 group_sched_out(struct perf_event *group_event,
569 struct perf_cpu_context *cpuctx,
570 struct perf_event_context *ctx)
572 struct perf_event *event;
573 int state = group_event->state;
575 event_sched_out(group_event, cpuctx, ctx);
578 * Schedule out siblings (if any):
580 list_for_each_entry(event, &group_event->sibling_list, group_entry)
581 event_sched_out(event, cpuctx, ctx);
583 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
584 cpuctx->exclusive = 0;
587 static inline struct perf_cpu_context *
588 __get_cpu_context(struct perf_event_context *ctx)
590 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
594 * Cross CPU call to remove a performance event
596 * We disable the event on the hardware level first. After that we
597 * remove it from the context list.
599 static void __perf_event_remove_from_context(void *info)
601 struct perf_event *event = info;
602 struct perf_event_context *ctx = event->ctx;
603 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
606 * If this is a task context, we need to check whether it is
607 * the current task context of this cpu. If not it has been
608 * scheduled out before the smp call arrived.
610 if (ctx->task && cpuctx->task_ctx != ctx)
613 raw_spin_lock(&ctx->lock);
615 event_sched_out(event, cpuctx, ctx);
617 list_del_event(event, ctx);
619 raw_spin_unlock(&ctx->lock);
624 * Remove the event from a task's (or a CPU's) list of events.
626 * Must be called with ctx->mutex held.
628 * CPU events are removed with a smp call. For task events we only
629 * call when the task is on a CPU.
631 * If event->ctx is a cloned context, callers must make sure that
632 * every task struct that event->ctx->task could possibly point to
633 * remains valid. This is OK when called from perf_release since
634 * that only calls us on the top-level context, which can't be a clone.
635 * When called from perf_event_exit_task, it's OK because the
636 * context has been detached from its task.
638 static void perf_event_remove_from_context(struct perf_event *event)
640 struct perf_event_context *ctx = event->ctx;
641 struct task_struct *task = ctx->task;
645 * Per cpu events are removed via an smp call and
646 * the removal is always successful.
648 smp_call_function_single(event->cpu,
649 __perf_event_remove_from_context,
655 task_oncpu_function_call(task, __perf_event_remove_from_context,
658 raw_spin_lock_irq(&ctx->lock);
660 * If the context is active we need to retry the smp call.
662 if (ctx->nr_active && !list_empty(&event->group_entry)) {
663 raw_spin_unlock_irq(&ctx->lock);
668 * The lock prevents that this context is scheduled in so we
669 * can remove the event safely, if the call above did not
672 if (!list_empty(&event->group_entry))
673 list_del_event(event, ctx);
674 raw_spin_unlock_irq(&ctx->lock);
678 * Cross CPU call to disable a performance event
680 static void __perf_event_disable(void *info)
682 struct perf_event *event = info;
683 struct perf_event_context *ctx = event->ctx;
684 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
687 * If this is a per-task event, need to check whether this
688 * event's task is the current task on this cpu.
690 if (ctx->task && cpuctx->task_ctx != ctx)
693 raw_spin_lock(&ctx->lock);
696 * If the event is on, turn it off.
697 * If it is in error state, leave it in error state.
699 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
700 update_context_time(ctx);
701 update_group_times(event);
702 if (event == event->group_leader)
703 group_sched_out(event, cpuctx, ctx);
705 event_sched_out(event, cpuctx, ctx);
706 event->state = PERF_EVENT_STATE_OFF;
709 raw_spin_unlock(&ctx->lock);
715 * If event->ctx is a cloned context, callers must make sure that
716 * every task struct that event->ctx->task could possibly point to
717 * remains valid. This condition is satisifed when called through
718 * perf_event_for_each_child or perf_event_for_each because they
719 * hold the top-level event's child_mutex, so any descendant that
720 * goes to exit will block in sync_child_event.
721 * When called from perf_pending_event it's OK because event->ctx
722 * is the current context on this CPU and preemption is disabled,
723 * hence we can't get into perf_event_task_sched_out for this context.
725 void perf_event_disable(struct perf_event *event)
727 struct perf_event_context *ctx = event->ctx;
728 struct task_struct *task = ctx->task;
732 * Disable the event on the cpu that it's on
734 smp_call_function_single(event->cpu, __perf_event_disable,
740 task_oncpu_function_call(task, __perf_event_disable, event);
742 raw_spin_lock_irq(&ctx->lock);
744 * If the event is still active, we need to retry the cross-call.
746 if (event->state == PERF_EVENT_STATE_ACTIVE) {
747 raw_spin_unlock_irq(&ctx->lock);
752 * Since we have the lock this context can't be scheduled
753 * in, so we can change the state safely.
755 if (event->state == PERF_EVENT_STATE_INACTIVE) {
756 update_group_times(event);
757 event->state = PERF_EVENT_STATE_OFF;
760 raw_spin_unlock_irq(&ctx->lock);
764 event_sched_in(struct perf_event *event,
765 struct perf_cpu_context *cpuctx,
766 struct perf_event_context *ctx)
768 if (event->state <= PERF_EVENT_STATE_OFF)
771 event->state = PERF_EVENT_STATE_ACTIVE;
772 event->oncpu = smp_processor_id();
774 * The new state must be visible before we turn it on in the hardware:
778 if (event->pmu->add(event, PERF_EF_START)) {
779 event->state = PERF_EVENT_STATE_INACTIVE;
784 event->tstamp_running += ctx->time - event->tstamp_stopped;
786 event->shadow_ctx_time = ctx->time - ctx->timestamp;
788 if (!is_software_event(event))
789 cpuctx->active_oncpu++;
792 if (event->attr.exclusive)
793 cpuctx->exclusive = 1;
799 group_sched_in(struct perf_event *group_event,
800 struct perf_cpu_context *cpuctx,
801 struct perf_event_context *ctx)
803 struct perf_event *event, *partial_group = NULL;
804 struct pmu *pmu = group_event->pmu;
806 bool simulate = false;
808 if (group_event->state == PERF_EVENT_STATE_OFF)
813 if (event_sched_in(group_event, cpuctx, ctx)) {
814 pmu->cancel_txn(pmu);
819 * Schedule in siblings as one group (if any):
821 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
822 if (event_sched_in(event, cpuctx, ctx)) {
823 partial_group = event;
828 if (!pmu->commit_txn(pmu))
833 * Groups can be scheduled in as one unit only, so undo any
834 * partial group before returning:
835 * The events up to the failed event are scheduled out normally,
836 * tstamp_stopped will be updated.
838 * The failed events and the remaining siblings need to have
839 * their timings updated as if they had gone thru event_sched_in()
840 * and event_sched_out(). This is required to get consistent timings
841 * across the group. This also takes care of the case where the group
842 * could never be scheduled by ensuring tstamp_stopped is set to mark
843 * the time the event was actually stopped, such that time delta
844 * calculation in update_event_times() is correct.
846 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
847 if (event == partial_group)
851 event->tstamp_running += now - event->tstamp_stopped;
852 event->tstamp_stopped = now;
854 event_sched_out(event, cpuctx, ctx);
857 event_sched_out(group_event, cpuctx, ctx);
859 pmu->cancel_txn(pmu);
865 * Work out whether we can put this event group on the CPU now.
867 static int group_can_go_on(struct perf_event *event,
868 struct perf_cpu_context *cpuctx,
872 * Groups consisting entirely of software events can always go on.
874 if (event->group_flags & PERF_GROUP_SOFTWARE)
877 * If an exclusive group is already on, no other hardware
880 if (cpuctx->exclusive)
883 * If this group is exclusive and there are already
884 * events on the CPU, it can't go on.
886 if (event->attr.exclusive && cpuctx->active_oncpu)
889 * Otherwise, try to add it if all previous groups were able
895 static void add_event_to_ctx(struct perf_event *event,
896 struct perf_event_context *ctx)
898 list_add_event(event, ctx);
899 perf_group_attach(event);
900 event->tstamp_enabled = ctx->time;
901 event->tstamp_running = ctx->time;
902 event->tstamp_stopped = ctx->time;
906 * Cross CPU call to install and enable a performance event
908 * Must be called with ctx->mutex held
910 static void __perf_install_in_context(void *info)
912 struct perf_event *event = info;
913 struct perf_event_context *ctx = event->ctx;
914 struct perf_event *leader = event->group_leader;
915 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
919 * If this is a task context, we need to check whether it is
920 * the current task context of this cpu. If not it has been
921 * scheduled out before the smp call arrived.
922 * Or possibly this is the right context but it isn't
923 * on this cpu because it had no events.
925 if (ctx->task && cpuctx->task_ctx != ctx) {
926 if (cpuctx->task_ctx || ctx->task != current)
928 cpuctx->task_ctx = ctx;
931 raw_spin_lock(&ctx->lock);
933 update_context_time(ctx);
935 add_event_to_ctx(event, ctx);
937 if (event->cpu != -1 && event->cpu != smp_processor_id())
941 * Don't put the event on if it is disabled or if
942 * it is in a group and the group isn't on.
944 if (event->state != PERF_EVENT_STATE_INACTIVE ||
945 (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
949 * An exclusive event can't go on if there are already active
950 * hardware events, and no hardware event can go on if there
951 * is already an exclusive event on.
953 if (!group_can_go_on(event, cpuctx, 1))
956 err = event_sched_in(event, cpuctx, ctx);
960 * This event couldn't go on. If it is in a group
961 * then we have to pull the whole group off.
962 * If the event group is pinned then put it in error state.
965 group_sched_out(leader, cpuctx, ctx);
966 if (leader->attr.pinned) {
967 update_group_times(leader);
968 leader->state = PERF_EVENT_STATE_ERROR;
973 raw_spin_unlock(&ctx->lock);
977 * Attach a performance event to a context
979 * First we add the event to the list with the hardware enable bit
980 * in event->hw_config cleared.
982 * If the event is attached to a task which is on a CPU we use a smp
983 * call to enable it in the task context. The task might have been
984 * scheduled away, but we check this in the smp call again.
986 * Must be called with ctx->mutex held.
989 perf_install_in_context(struct perf_event_context *ctx,
990 struct perf_event *event,
993 struct task_struct *task = ctx->task;
999 * Per cpu events are installed via an smp call and
1000 * the install is always successful.
1002 smp_call_function_single(cpu, __perf_install_in_context,
1008 task_oncpu_function_call(task, __perf_install_in_context,
1011 raw_spin_lock_irq(&ctx->lock);
1013 * we need to retry the smp call.
1015 if (ctx->is_active && list_empty(&event->group_entry)) {
1016 raw_spin_unlock_irq(&ctx->lock);
1021 * The lock prevents that this context is scheduled in so we
1022 * can add the event safely, if it the call above did not
1025 if (list_empty(&event->group_entry))
1026 add_event_to_ctx(event, ctx);
1027 raw_spin_unlock_irq(&ctx->lock);
1031 * Put a event into inactive state and update time fields.
1032 * Enabling the leader of a group effectively enables all
1033 * the group members that aren't explicitly disabled, so we
1034 * have to update their ->tstamp_enabled also.
1035 * Note: this works for group members as well as group leaders
1036 * since the non-leader members' sibling_lists will be empty.
1038 static void __perf_event_mark_enabled(struct perf_event *event,
1039 struct perf_event_context *ctx)
1041 struct perf_event *sub;
1043 event->state = PERF_EVENT_STATE_INACTIVE;
1044 event->tstamp_enabled = ctx->time - event->total_time_enabled;
1045 list_for_each_entry(sub, &event->sibling_list, group_entry) {
1046 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
1047 sub->tstamp_enabled =
1048 ctx->time - sub->total_time_enabled;
1054 * Cross CPU call to enable a performance event
1056 static void __perf_event_enable(void *info)
1058 struct perf_event *event = info;
1059 struct perf_event_context *ctx = event->ctx;
1060 struct perf_event *leader = event->group_leader;
1061 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1065 * If this is a per-task event, need to check whether this
1066 * event's task is the current task on this cpu.
1068 if (ctx->task && cpuctx->task_ctx != ctx) {
1069 if (cpuctx->task_ctx || ctx->task != current)
1071 cpuctx->task_ctx = ctx;
1074 raw_spin_lock(&ctx->lock);
1076 update_context_time(ctx);
1078 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1080 __perf_event_mark_enabled(event, ctx);
1082 if (event->cpu != -1 && event->cpu != smp_processor_id())
1086 * If the event is in a group and isn't the group leader,
1087 * then don't put it on unless the group is on.
1089 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
1092 if (!group_can_go_on(event, cpuctx, 1)) {
1095 if (event == leader)
1096 err = group_sched_in(event, cpuctx, ctx);
1098 err = event_sched_in(event, cpuctx, ctx);
1103 * If this event can't go on and it's part of a
1104 * group, then the whole group has to come off.
1106 if (leader != event)
1107 group_sched_out(leader, cpuctx, ctx);
1108 if (leader->attr.pinned) {
1109 update_group_times(leader);
1110 leader->state = PERF_EVENT_STATE_ERROR;
1115 raw_spin_unlock(&ctx->lock);
1121 * If event->ctx is a cloned context, callers must make sure that
1122 * every task struct that event->ctx->task could possibly point to
1123 * remains valid. This condition is satisfied when called through
1124 * perf_event_for_each_child or perf_event_for_each as described
1125 * for perf_event_disable.
1127 void perf_event_enable(struct perf_event *event)
1129 struct perf_event_context *ctx = event->ctx;
1130 struct task_struct *task = ctx->task;
1134 * Enable the event on the cpu that it's on
1136 smp_call_function_single(event->cpu, __perf_event_enable,
1141 raw_spin_lock_irq(&ctx->lock);
1142 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1146 * If the event is in error state, clear that first.
1147 * That way, if we see the event in error state below, we
1148 * know that it has gone back into error state, as distinct
1149 * from the task having been scheduled away before the
1150 * cross-call arrived.
1152 if (event->state == PERF_EVENT_STATE_ERROR)
1153 event->state = PERF_EVENT_STATE_OFF;
1156 raw_spin_unlock_irq(&ctx->lock);
1157 task_oncpu_function_call(task, __perf_event_enable, event);
1159 raw_spin_lock_irq(&ctx->lock);
1162 * If the context is active and the event is still off,
1163 * we need to retry the cross-call.
1165 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
1169 * Since we have the lock this context can't be scheduled
1170 * in, so we can change the state safely.
1172 if (event->state == PERF_EVENT_STATE_OFF)
1173 __perf_event_mark_enabled(event, ctx);
1176 raw_spin_unlock_irq(&ctx->lock);
1179 static int perf_event_refresh(struct perf_event *event, int refresh)
1182 * not supported on inherited events
1184 if (event->attr.inherit || !is_sampling_event(event))
1187 atomic_add(refresh, &event->event_limit);
1188 perf_event_enable(event);
1194 EVENT_FLEXIBLE = 0x1,
1196 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
1199 static void ctx_sched_out(struct perf_event_context *ctx,
1200 struct perf_cpu_context *cpuctx,
1201 enum event_type_t event_type)
1203 struct perf_event *event;
1205 raw_spin_lock(&ctx->lock);
1206 perf_pmu_disable(ctx->pmu);
1208 if (likely(!ctx->nr_events))
1210 update_context_time(ctx);
1212 if (!ctx->nr_active)
1215 if (event_type & EVENT_PINNED) {
1216 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1217 group_sched_out(event, cpuctx, ctx);
1220 if (event_type & EVENT_FLEXIBLE) {
1221 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1222 group_sched_out(event, cpuctx, ctx);
1225 perf_pmu_enable(ctx->pmu);
1226 raw_spin_unlock(&ctx->lock);
1230 * Test whether two contexts are equivalent, i.e. whether they
1231 * have both been cloned from the same version of the same context
1232 * and they both have the same number of enabled events.
1233 * If the number of enabled events is the same, then the set
1234 * of enabled events should be the same, because these are both
1235 * inherited contexts, therefore we can't access individual events
1236 * in them directly with an fd; we can only enable/disable all
1237 * events via prctl, or enable/disable all events in a family
1238 * via ioctl, which will have the same effect on both contexts.
1240 static int context_equiv(struct perf_event_context *ctx1,
1241 struct perf_event_context *ctx2)
1243 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1244 && ctx1->parent_gen == ctx2->parent_gen
1245 && !ctx1->pin_count && !ctx2->pin_count;
1248 static void __perf_event_sync_stat(struct perf_event *event,
1249 struct perf_event *next_event)
1253 if (!event->attr.inherit_stat)
1257 * Update the event value, we cannot use perf_event_read()
1258 * because we're in the middle of a context switch and have IRQs
1259 * disabled, which upsets smp_call_function_single(), however
1260 * we know the event must be on the current CPU, therefore we
1261 * don't need to use it.
1263 switch (event->state) {
1264 case PERF_EVENT_STATE_ACTIVE:
1265 event->pmu->read(event);
1268 case PERF_EVENT_STATE_INACTIVE:
1269 update_event_times(event);
1277 * In order to keep per-task stats reliable we need to flip the event
1278 * values when we flip the contexts.
1280 value = local64_read(&next_event->count);
1281 value = local64_xchg(&event->count, value);
1282 local64_set(&next_event->count, value);
1284 swap(event->total_time_enabled, next_event->total_time_enabled);
1285 swap(event->total_time_running, next_event->total_time_running);
1288 * Since we swizzled the values, update the user visible data too.
1290 perf_event_update_userpage(event);
1291 perf_event_update_userpage(next_event);
1294 #define list_next_entry(pos, member) \
1295 list_entry(pos->member.next, typeof(*pos), member)
1297 static void perf_event_sync_stat(struct perf_event_context *ctx,
1298 struct perf_event_context *next_ctx)
1300 struct perf_event *event, *next_event;
1305 update_context_time(ctx);
1307 event = list_first_entry(&ctx->event_list,
1308 struct perf_event, event_entry);
1310 next_event = list_first_entry(&next_ctx->event_list,
1311 struct perf_event, event_entry);
1313 while (&event->event_entry != &ctx->event_list &&
1314 &next_event->event_entry != &next_ctx->event_list) {
1316 __perf_event_sync_stat(event, next_event);
1318 event = list_next_entry(event, event_entry);
1319 next_event = list_next_entry(next_event, event_entry);
1323 void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1324 struct task_struct *next)
1326 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
1327 struct perf_event_context *next_ctx;
1328 struct perf_event_context *parent;
1329 struct perf_cpu_context *cpuctx;
1335 cpuctx = __get_cpu_context(ctx);
1336 if (!cpuctx->task_ctx)
1340 parent = rcu_dereference(ctx->parent_ctx);
1341 next_ctx = next->perf_event_ctxp[ctxn];
1342 if (parent && next_ctx &&
1343 rcu_dereference(next_ctx->parent_ctx) == parent) {
1345 * Looks like the two contexts are clones, so we might be
1346 * able to optimize the context switch. We lock both
1347 * contexts and check that they are clones under the
1348 * lock (including re-checking that neither has been
1349 * uncloned in the meantime). It doesn't matter which
1350 * order we take the locks because no other cpu could
1351 * be trying to lock both of these tasks.
1353 raw_spin_lock(&ctx->lock);
1354 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1355 if (context_equiv(ctx, next_ctx)) {
1357 * XXX do we need a memory barrier of sorts
1358 * wrt to rcu_dereference() of perf_event_ctxp
1360 task->perf_event_ctxp[ctxn] = next_ctx;
1361 next->perf_event_ctxp[ctxn] = ctx;
1363 next_ctx->task = task;
1366 perf_event_sync_stat(ctx, next_ctx);
1368 raw_spin_unlock(&next_ctx->lock);
1369 raw_spin_unlock(&ctx->lock);
1374 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
1375 cpuctx->task_ctx = NULL;
1379 #define for_each_task_context_nr(ctxn) \
1380 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
1383 * Called from scheduler to remove the events of the current task,
1384 * with interrupts disabled.
1386 * We stop each event and update the event value in event->count.
1388 * This does not protect us against NMI, but disable()
1389 * sets the disabled bit in the control field of event _before_
1390 * accessing the event control register. If a NMI hits, then it will
1391 * not restart the event.
1393 void __perf_event_task_sched_out(struct task_struct *task,
1394 struct task_struct *next)
1398 for_each_task_context_nr(ctxn)
1399 perf_event_context_sched_out(task, ctxn, next);
1402 static void task_ctx_sched_out(struct perf_event_context *ctx,
1403 enum event_type_t event_type)
1405 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1407 if (!cpuctx->task_ctx)
1410 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1413 ctx_sched_out(ctx, cpuctx, event_type);
1414 cpuctx->task_ctx = NULL;
1418 * Called with IRQs disabled
1420 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1421 enum event_type_t event_type)
1423 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
1427 ctx_pinned_sched_in(struct perf_event_context *ctx,
1428 struct perf_cpu_context *cpuctx)
1430 struct perf_event *event;
1432 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1433 if (event->state <= PERF_EVENT_STATE_OFF)
1435 if (event->cpu != -1 && event->cpu != smp_processor_id())
1438 if (group_can_go_on(event, cpuctx, 1))
1439 group_sched_in(event, cpuctx, ctx);
1442 * If this pinned group hasn't been scheduled,
1443 * put it in error state.
1445 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1446 update_group_times(event);
1447 event->state = PERF_EVENT_STATE_ERROR;
1453 ctx_flexible_sched_in(struct perf_event_context *ctx,
1454 struct perf_cpu_context *cpuctx)
1456 struct perf_event *event;
1459 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1460 /* Ignore events in OFF or ERROR state */
1461 if (event->state <= PERF_EVENT_STATE_OFF)
1464 * Listen to the 'cpu' scheduling filter constraint
1467 if (event->cpu != -1 && event->cpu != smp_processor_id())
1470 if (group_can_go_on(event, cpuctx, can_add_hw)) {
1471 if (group_sched_in(event, cpuctx, ctx))
1478 ctx_sched_in(struct perf_event_context *ctx,
1479 struct perf_cpu_context *cpuctx,
1480 enum event_type_t event_type)
1482 raw_spin_lock(&ctx->lock);
1484 if (likely(!ctx->nr_events))
1487 ctx->timestamp = perf_clock();
1490 * First go through the list and put on any pinned groups
1491 * in order to give them the best chance of going on.
1493 if (event_type & EVENT_PINNED)
1494 ctx_pinned_sched_in(ctx, cpuctx);
1496 /* Then walk through the lower prio flexible groups */
1497 if (event_type & EVENT_FLEXIBLE)
1498 ctx_flexible_sched_in(ctx, cpuctx);
1501 raw_spin_unlock(&ctx->lock);
1504 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
1505 enum event_type_t event_type)
1507 struct perf_event_context *ctx = &cpuctx->ctx;
1509 ctx_sched_in(ctx, cpuctx, event_type);
1512 static void task_ctx_sched_in(struct perf_event_context *ctx,
1513 enum event_type_t event_type)
1515 struct perf_cpu_context *cpuctx;
1517 cpuctx = __get_cpu_context(ctx);
1518 if (cpuctx->task_ctx == ctx)
1521 ctx_sched_in(ctx, cpuctx, event_type);
1522 cpuctx->task_ctx = ctx;
1525 void perf_event_context_sched_in(struct perf_event_context *ctx)
1527 struct perf_cpu_context *cpuctx;
1529 cpuctx = __get_cpu_context(ctx);
1530 if (cpuctx->task_ctx == ctx)
1533 perf_pmu_disable(ctx->pmu);
1535 * We want to keep the following priority order:
1536 * cpu pinned (that don't need to move), task pinned,
1537 * cpu flexible, task flexible.
1539 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1541 ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
1542 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1543 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1545 cpuctx->task_ctx = ctx;
1548 * Since these rotations are per-cpu, we need to ensure the
1549 * cpu-context we got scheduled on is actually rotating.
1551 perf_pmu_rotate_start(ctx->pmu);
1552 perf_pmu_enable(ctx->pmu);
1556 * Called from scheduler to add the events of the current task
1557 * with interrupts disabled.
1559 * We restore the event value and then enable it.
1561 * This does not protect us against NMI, but enable()
1562 * sets the enabled bit in the control field of event _before_
1563 * accessing the event control register. If a NMI hits, then it will
1564 * keep the event running.
1566 void __perf_event_task_sched_in(struct task_struct *task)
1568 struct perf_event_context *ctx;
1571 for_each_task_context_nr(ctxn) {
1572 ctx = task->perf_event_ctxp[ctxn];
1576 perf_event_context_sched_in(ctx);
1580 #define MAX_INTERRUPTS (~0ULL)
1582 static void perf_log_throttle(struct perf_event *event, int enable);
1584 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1586 u64 frequency = event->attr.sample_freq;
1587 u64 sec = NSEC_PER_SEC;
1588 u64 divisor, dividend;
1590 int count_fls, nsec_fls, frequency_fls, sec_fls;
1592 count_fls = fls64(count);
1593 nsec_fls = fls64(nsec);
1594 frequency_fls = fls64(frequency);
1598 * We got @count in @nsec, with a target of sample_freq HZ
1599 * the target period becomes:
1602 * period = -------------------
1603 * @nsec * sample_freq
1608 * Reduce accuracy by one bit such that @a and @b converge
1609 * to a similar magnitude.
1611 #define REDUCE_FLS(a, b) \
1613 if (a##_fls > b##_fls) { \
1623 * Reduce accuracy until either term fits in a u64, then proceed with
1624 * the other, so that finally we can do a u64/u64 division.
1626 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
1627 REDUCE_FLS(nsec, frequency);
1628 REDUCE_FLS(sec, count);
1631 if (count_fls + sec_fls > 64) {
1632 divisor = nsec * frequency;
1634 while (count_fls + sec_fls > 64) {
1635 REDUCE_FLS(count, sec);
1639 dividend = count * sec;
1641 dividend = count * sec;
1643 while (nsec_fls + frequency_fls > 64) {
1644 REDUCE_FLS(nsec, frequency);
1648 divisor = nsec * frequency;
1654 return div64_u64(dividend, divisor);
1657 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
1659 struct hw_perf_event *hwc = &event->hw;
1660 s64 period, sample_period;
1663 period = perf_calculate_period(event, nsec, count);
1665 delta = (s64)(period - hwc->sample_period);
1666 delta = (delta + 7) / 8; /* low pass filter */
1668 sample_period = hwc->sample_period + delta;
1673 hwc->sample_period = sample_period;
1675 if (local64_read(&hwc->period_left) > 8*sample_period) {
1676 event->pmu->stop(event, PERF_EF_UPDATE);
1677 local64_set(&hwc->period_left, 0);
1678 event->pmu->start(event, PERF_EF_RELOAD);
1682 static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
1684 struct perf_event *event;
1685 struct hw_perf_event *hwc;
1686 u64 interrupts, now;
1689 raw_spin_lock(&ctx->lock);
1690 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1691 if (event->state != PERF_EVENT_STATE_ACTIVE)
1694 if (event->cpu != -1 && event->cpu != smp_processor_id())
1699 interrupts = hwc->interrupts;
1700 hwc->interrupts = 0;
1703 * unthrottle events on the tick
1705 if (interrupts == MAX_INTERRUPTS) {
1706 perf_log_throttle(event, 1);
1707 event->pmu->start(event, 0);
1710 if (!event->attr.freq || !event->attr.sample_freq)
1713 event->pmu->read(event);
1714 now = local64_read(&event->count);
1715 delta = now - hwc->freq_count_stamp;
1716 hwc->freq_count_stamp = now;
1719 perf_adjust_period(event, period, delta);
1721 raw_spin_unlock(&ctx->lock);
1725 * Round-robin a context's events:
1727 static void rotate_ctx(struct perf_event_context *ctx)
1729 raw_spin_lock(&ctx->lock);
1732 * Rotate the first entry last of non-pinned groups. Rotation might be
1733 * disabled by the inheritance code.
1735 if (!ctx->rotate_disable)
1736 list_rotate_left(&ctx->flexible_groups);
1738 raw_spin_unlock(&ctx->lock);
1742 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
1743 * because they're strictly cpu affine and rotate_start is called with IRQs
1744 * disabled, while rotate_context is called from IRQ context.
1746 static void perf_rotate_context(struct perf_cpu_context *cpuctx)
1748 u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
1749 struct perf_event_context *ctx = NULL;
1750 int rotate = 0, remove = 1;
1752 if (cpuctx->ctx.nr_events) {
1754 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1758 ctx = cpuctx->task_ctx;
1759 if (ctx && ctx->nr_events) {
1761 if (ctx->nr_events != ctx->nr_active)
1765 perf_pmu_disable(cpuctx->ctx.pmu);
1766 perf_ctx_adjust_freq(&cpuctx->ctx, interval);
1768 perf_ctx_adjust_freq(ctx, interval);
1773 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1775 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
1777 rotate_ctx(&cpuctx->ctx);
1781 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1783 task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
1787 list_del_init(&cpuctx->rotation_list);
1789 perf_pmu_enable(cpuctx->ctx.pmu);
1792 void perf_event_task_tick(void)
1794 struct list_head *head = &__get_cpu_var(rotation_list);
1795 struct perf_cpu_context *cpuctx, *tmp;
1797 WARN_ON(!irqs_disabled());
1799 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
1800 if (cpuctx->jiffies_interval == 1 ||
1801 !(jiffies % cpuctx->jiffies_interval))
1802 perf_rotate_context(cpuctx);
1806 static int event_enable_on_exec(struct perf_event *event,
1807 struct perf_event_context *ctx)
1809 if (!event->attr.enable_on_exec)
1812 event->attr.enable_on_exec = 0;
1813 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1816 __perf_event_mark_enabled(event, ctx);
1822 * Enable all of a task's events that have been marked enable-on-exec.
1823 * This expects task == current.
1825 static void perf_event_enable_on_exec(struct perf_event_context *ctx)
1827 struct perf_event *event;
1828 unsigned long flags;
1832 local_irq_save(flags);
1833 if (!ctx || !ctx->nr_events)
1836 task_ctx_sched_out(ctx, EVENT_ALL);
1838 raw_spin_lock(&ctx->lock);
1840 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1841 ret = event_enable_on_exec(event, ctx);
1846 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1847 ret = event_enable_on_exec(event, ctx);
1853 * Unclone this context if we enabled any event.
1858 raw_spin_unlock(&ctx->lock);
1860 perf_event_context_sched_in(ctx);
1862 local_irq_restore(flags);
1866 * Cross CPU call to read the hardware event
1868 static void __perf_event_read(void *info)
1870 struct perf_event *event = info;
1871 struct perf_event_context *ctx = event->ctx;
1872 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1875 * If this is a task context, we need to check whether it is
1876 * the current task context of this cpu. If not it has been
1877 * scheduled out before the smp call arrived. In that case
1878 * event->count would have been updated to a recent sample
1879 * when the event was scheduled out.
1881 if (ctx->task && cpuctx->task_ctx != ctx)
1884 raw_spin_lock(&ctx->lock);
1885 update_context_time(ctx);
1886 update_event_times(event);
1887 raw_spin_unlock(&ctx->lock);
1889 event->pmu->read(event);
1892 static inline u64 perf_event_count(struct perf_event *event)
1894 return local64_read(&event->count) + atomic64_read(&event->child_count);
1897 static u64 perf_event_read(struct perf_event *event)
1900 * If event is enabled and currently active on a CPU, update the
1901 * value in the event structure:
1903 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1904 smp_call_function_single(event->oncpu,
1905 __perf_event_read, event, 1);
1906 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1907 struct perf_event_context *ctx = event->ctx;
1908 unsigned long flags;
1910 raw_spin_lock_irqsave(&ctx->lock, flags);
1912 * may read while context is not active
1913 * (e.g., thread is blocked), in that case
1914 * we cannot update context time
1917 update_context_time(ctx);
1918 update_event_times(event);
1919 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1922 return perf_event_count(event);
1929 struct callchain_cpus_entries {
1930 struct rcu_head rcu_head;
1931 struct perf_callchain_entry *cpu_entries[0];
1934 static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
1935 static atomic_t nr_callchain_events;
1936 static DEFINE_MUTEX(callchain_mutex);
1937 struct callchain_cpus_entries *callchain_cpus_entries;
1940 __weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
1941 struct pt_regs *regs)
1945 __weak void perf_callchain_user(struct perf_callchain_entry *entry,
1946 struct pt_regs *regs)
1950 static void release_callchain_buffers_rcu(struct rcu_head *head)
1952 struct callchain_cpus_entries *entries;
1955 entries = container_of(head, struct callchain_cpus_entries, rcu_head);
1957 for_each_possible_cpu(cpu)
1958 kfree(entries->cpu_entries[cpu]);
1963 static void release_callchain_buffers(void)
1965 struct callchain_cpus_entries *entries;
1967 entries = callchain_cpus_entries;
1968 rcu_assign_pointer(callchain_cpus_entries, NULL);
1969 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
1972 static int alloc_callchain_buffers(void)
1976 struct callchain_cpus_entries *entries;
1979 * We can't use the percpu allocation API for data that can be
1980 * accessed from NMI. Use a temporary manual per cpu allocation
1981 * until that gets sorted out.
1983 size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
1984 num_possible_cpus();
1986 entries = kzalloc(size, GFP_KERNEL);
1990 size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
1992 for_each_possible_cpu(cpu) {
1993 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
1995 if (!entries->cpu_entries[cpu])
1999 rcu_assign_pointer(callchain_cpus_entries, entries);
2004 for_each_possible_cpu(cpu)
2005 kfree(entries->cpu_entries[cpu]);
2011 static int get_callchain_buffers(void)
2016 mutex_lock(&callchain_mutex);
2018 count = atomic_inc_return(&nr_callchain_events);
2019 if (WARN_ON_ONCE(count < 1)) {
2025 /* If the allocation failed, give up */
2026 if (!callchain_cpus_entries)
2031 err = alloc_callchain_buffers();
2033 release_callchain_buffers();
2035 mutex_unlock(&callchain_mutex);
2040 static void put_callchain_buffers(void)
2042 if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
2043 release_callchain_buffers();
2044 mutex_unlock(&callchain_mutex);
2048 static int get_recursion_context(int *recursion)
2056 else if (in_softirq())
2061 if (recursion[rctx])
2070 static inline void put_recursion_context(int *recursion, int rctx)
2076 static struct perf_callchain_entry *get_callchain_entry(int *rctx)
2079 struct callchain_cpus_entries *entries;
2081 *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
2085 entries = rcu_dereference(callchain_cpus_entries);
2089 cpu = smp_processor_id();
2091 return &entries->cpu_entries[cpu][*rctx];
2095 put_callchain_entry(int rctx)
2097 put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
2100 static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2103 struct perf_callchain_entry *entry;
2106 entry = get_callchain_entry(&rctx);
2115 if (!user_mode(regs)) {
2116 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
2117 perf_callchain_kernel(entry, regs);
2119 regs = task_pt_regs(current);
2125 perf_callchain_store(entry, PERF_CONTEXT_USER);
2126 perf_callchain_user(entry, regs);
2130 put_callchain_entry(rctx);
2136 * Initialize the perf_event context in a task_struct:
2138 static void __perf_event_init_context(struct perf_event_context *ctx)
2140 raw_spin_lock_init(&ctx->lock);
2141 mutex_init(&ctx->mutex);
2142 INIT_LIST_HEAD(&ctx->pinned_groups);
2143 INIT_LIST_HEAD(&ctx->flexible_groups);
2144 INIT_LIST_HEAD(&ctx->event_list);
2145 atomic_set(&ctx->refcount, 1);
2148 static struct perf_event_context *
2149 alloc_perf_context(struct pmu *pmu, struct task_struct *task)
2151 struct perf_event_context *ctx;
2153 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2157 __perf_event_init_context(ctx);
2160 get_task_struct(task);
2167 static struct task_struct *
2168 find_lively_task_by_vpid(pid_t vpid)
2170 struct task_struct *task;
2177 task = find_task_by_vpid(vpid);
2179 get_task_struct(task);
2183 return ERR_PTR(-ESRCH);
2186 * Can't attach events to a dying task.
2189 if (task->flags & PF_EXITING)
2192 /* Reuse ptrace permission checks for now. */
2194 if (!ptrace_may_access(task, PTRACE_MODE_READ))
2199 put_task_struct(task);
2200 return ERR_PTR(err);
2204 static struct perf_event_context *
2205 find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
2207 struct perf_event_context *ctx;
2208 struct perf_cpu_context *cpuctx;
2209 unsigned long flags;
2212 if (!task && cpu != -1) {
2213 /* Must be root to operate on a CPU event: */
2214 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
2215 return ERR_PTR(-EACCES);
2217 if (cpu < 0 || cpu >= nr_cpumask_bits)
2218 return ERR_PTR(-EINVAL);
2221 * We could be clever and allow to attach a event to an
2222 * offline CPU and activate it when the CPU comes up, but
2225 if (!cpu_online(cpu))
2226 return ERR_PTR(-ENODEV);
2228 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
2236 ctxn = pmu->task_ctx_nr;
2241 ctx = perf_lock_task_context(task, ctxn, &flags);
2244 raw_spin_unlock_irqrestore(&ctx->lock, flags);
2248 ctx = alloc_perf_context(pmu, task);
2255 if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) {
2257 * We raced with some other task; use
2258 * the context they set.
2260 put_task_struct(task);
2269 return ERR_PTR(err);
2272 static void perf_event_free_filter(struct perf_event *event);
2274 static void free_event_rcu(struct rcu_head *head)
2276 struct perf_event *event;
2278 event = container_of(head, struct perf_event, rcu_head);
2280 put_pid_ns(event->ns);
2281 perf_event_free_filter(event);
2285 static void perf_buffer_put(struct perf_buffer *buffer);
2287 static void free_event(struct perf_event *event)
2289 irq_work_sync(&event->pending);
2291 if (!event->parent) {
2292 if (event->attach_state & PERF_ATTACH_TASK)
2293 jump_label_dec(&perf_task_events);
2294 if (event->attr.mmap || event->attr.mmap_data)
2295 atomic_dec(&nr_mmap_events);
2296 if (event->attr.comm)
2297 atomic_dec(&nr_comm_events);
2298 if (event->attr.task)
2299 atomic_dec(&nr_task_events);
2300 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2301 put_callchain_buffers();
2304 if (event->buffer) {
2305 perf_buffer_put(event->buffer);
2306 event->buffer = NULL;
2310 event->destroy(event);
2313 put_ctx(event->ctx);
2315 call_rcu(&event->rcu_head, free_event_rcu);
2318 int perf_event_release_kernel(struct perf_event *event)
2320 struct perf_event_context *ctx = event->ctx;
2323 * Remove from the PMU, can't get re-enabled since we got
2324 * here because the last ref went.
2326 perf_event_disable(event);
2328 WARN_ON_ONCE(ctx->parent_ctx);
2330 * There are two ways this annotation is useful:
2332 * 1) there is a lock recursion from perf_event_exit_task
2333 * see the comment there.
2335 * 2) there is a lock-inversion with mmap_sem through
2336 * perf_event_read_group(), which takes faults while
2337 * holding ctx->mutex, however this is called after
2338 * the last filedesc died, so there is no possibility
2339 * to trigger the AB-BA case.
2341 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
2342 raw_spin_lock_irq(&ctx->lock);
2343 perf_group_detach(event);
2344 list_del_event(event, ctx);
2345 raw_spin_unlock_irq(&ctx->lock);
2346 mutex_unlock(&ctx->mutex);
2352 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2355 * Called when the last reference to the file is gone.
2357 static int perf_release(struct inode *inode, struct file *file)
2359 struct perf_event *event = file->private_data;
2360 struct task_struct *owner;
2362 file->private_data = NULL;
2365 owner = ACCESS_ONCE(event->owner);
2367 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
2368 * !owner it means the list deletion is complete and we can indeed
2369 * free this event, otherwise we need to serialize on
2370 * owner->perf_event_mutex.
2372 smp_read_barrier_depends();
2375 * Since delayed_put_task_struct() also drops the last
2376 * task reference we can safely take a new reference
2377 * while holding the rcu_read_lock().
2379 get_task_struct(owner);
2384 mutex_lock(&owner->perf_event_mutex);
2386 * We have to re-check the event->owner field, if it is cleared
2387 * we raced with perf_event_exit_task(), acquiring the mutex
2388 * ensured they're done, and we can proceed with freeing the
2392 list_del_init(&event->owner_entry);
2393 mutex_unlock(&owner->perf_event_mutex);
2394 put_task_struct(owner);
2397 return perf_event_release_kernel(event);
2400 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
2402 struct perf_event *child;
2408 mutex_lock(&event->child_mutex);
2409 total += perf_event_read(event);
2410 *enabled += event->total_time_enabled +
2411 atomic64_read(&event->child_total_time_enabled);
2412 *running += event->total_time_running +
2413 atomic64_read(&event->child_total_time_running);
2415 list_for_each_entry(child, &event->child_list, child_list) {
2416 total += perf_event_read(child);
2417 *enabled += child->total_time_enabled;
2418 *running += child->total_time_running;
2420 mutex_unlock(&event->child_mutex);
2424 EXPORT_SYMBOL_GPL(perf_event_read_value);
2426 static int perf_event_read_group(struct perf_event *event,
2427 u64 read_format, char __user *buf)
2429 struct perf_event *leader = event->group_leader, *sub;
2430 int n = 0, size = 0, ret = -EFAULT;
2431 struct perf_event_context *ctx = leader->ctx;
2433 u64 count, enabled, running;
2435 mutex_lock(&ctx->mutex);
2436 count = perf_event_read_value(leader, &enabled, &running);
2438 values[n++] = 1 + leader->nr_siblings;
2439 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2440 values[n++] = enabled;
2441 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2442 values[n++] = running;
2443 values[n++] = count;
2444 if (read_format & PERF_FORMAT_ID)
2445 values[n++] = primary_event_id(leader);
2447 size = n * sizeof(u64);
2449 if (copy_to_user(buf, values, size))
2454 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2457 values[n++] = perf_event_read_value(sub, &enabled, &running);
2458 if (read_format & PERF_FORMAT_ID)
2459 values[n++] = primary_event_id(sub);
2461 size = n * sizeof(u64);
2463 if (copy_to_user(buf + ret, values, size)) {
2471 mutex_unlock(&ctx->mutex);
2476 static int perf_event_read_one(struct perf_event *event,
2477 u64 read_format, char __user *buf)
2479 u64 enabled, running;
2483 values[n++] = perf_event_read_value(event, &enabled, &running);
2484 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2485 values[n++] = enabled;
2486 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2487 values[n++] = running;
2488 if (read_format & PERF_FORMAT_ID)
2489 values[n++] = primary_event_id(event);
2491 if (copy_to_user(buf, values, n * sizeof(u64)))
2494 return n * sizeof(u64);
2498 * Read the performance event - simple non blocking version for now
2501 perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
2503 u64 read_format = event->attr.read_format;
2507 * Return end-of-file for a read on a event that is in
2508 * error state (i.e. because it was pinned but it couldn't be
2509 * scheduled on to the CPU at some point).
2511 if (event->state == PERF_EVENT_STATE_ERROR)
2514 if (count < event->read_size)
2517 WARN_ON_ONCE(event->ctx->parent_ctx);
2518 if (read_format & PERF_FORMAT_GROUP)
2519 ret = perf_event_read_group(event, read_format, buf);
2521 ret = perf_event_read_one(event, read_format, buf);
2527 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
2529 struct perf_event *event = file->private_data;
2531 return perf_read_hw(event, buf, count);
2534 static unsigned int perf_poll(struct file *file, poll_table *wait)
2536 struct perf_event *event = file->private_data;
2537 struct perf_buffer *buffer;
2538 unsigned int events = POLL_HUP;
2541 buffer = rcu_dereference(event->buffer);
2543 events = atomic_xchg(&buffer->poll, 0);
2546 poll_wait(file, &event->waitq, wait);
2551 static void perf_event_reset(struct perf_event *event)
2553 (void)perf_event_read(event);
2554 local64_set(&event->count, 0);
2555 perf_event_update_userpage(event);
2559 * Holding the top-level event's child_mutex means that any
2560 * descendant process that has inherited this event will block
2561 * in sync_child_event if it goes to exit, thus satisfying the
2562 * task existence requirements of perf_event_enable/disable.
2564 static void perf_event_for_each_child(struct perf_event *event,
2565 void (*func)(struct perf_event *))
2567 struct perf_event *child;
2569 WARN_ON_ONCE(event->ctx->parent_ctx);
2570 mutex_lock(&event->child_mutex);
2572 list_for_each_entry(child, &event->child_list, child_list)
2574 mutex_unlock(&event->child_mutex);
2577 static void perf_event_for_each(struct perf_event *event,
2578 void (*func)(struct perf_event *))
2580 struct perf_event_context *ctx = event->ctx;
2581 struct perf_event *sibling;
2583 WARN_ON_ONCE(ctx->parent_ctx);
2584 mutex_lock(&ctx->mutex);
2585 event = event->group_leader;
2587 perf_event_for_each_child(event, func);
2589 list_for_each_entry(sibling, &event->sibling_list, group_entry)
2590 perf_event_for_each_child(event, func);
2591 mutex_unlock(&ctx->mutex);
2594 static int perf_event_period(struct perf_event *event, u64 __user *arg)
2596 struct perf_event_context *ctx = event->ctx;
2600 if (!is_sampling_event(event))
2603 if (copy_from_user(&value, arg, sizeof(value)))
2609 raw_spin_lock_irq(&ctx->lock);
2610 if (event->attr.freq) {
2611 if (value > sysctl_perf_event_sample_rate) {
2616 event->attr.sample_freq = value;
2618 event->attr.sample_period = value;
2619 event->hw.sample_period = value;
2622 raw_spin_unlock_irq(&ctx->lock);
2627 static const struct file_operations perf_fops;
2629 static struct perf_event *perf_fget_light(int fd, int *fput_needed)
2633 file = fget_light(fd, fput_needed);
2635 return ERR_PTR(-EBADF);
2637 if (file->f_op != &perf_fops) {
2638 fput_light(file, *fput_needed);
2640 return ERR_PTR(-EBADF);
2643 return file->private_data;
2646 static int perf_event_set_output(struct perf_event *event,
2647 struct perf_event *output_event);
2648 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
2650 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2652 struct perf_event *event = file->private_data;
2653 void (*func)(struct perf_event *);
2657 case PERF_EVENT_IOC_ENABLE:
2658 func = perf_event_enable;
2660 case PERF_EVENT_IOC_DISABLE:
2661 func = perf_event_disable;
2663 case PERF_EVENT_IOC_RESET:
2664 func = perf_event_reset;
2667 case PERF_EVENT_IOC_REFRESH:
2668 return perf_event_refresh(event, arg);
2670 case PERF_EVENT_IOC_PERIOD:
2671 return perf_event_period(event, (u64 __user *)arg);
2673 case PERF_EVENT_IOC_SET_OUTPUT:
2675 struct perf_event *output_event = NULL;
2676 int fput_needed = 0;
2680 output_event = perf_fget_light(arg, &fput_needed);
2681 if (IS_ERR(output_event))
2682 return PTR_ERR(output_event);
2685 ret = perf_event_set_output(event, output_event);
2687 fput_light(output_event->filp, fput_needed);
2692 case PERF_EVENT_IOC_SET_FILTER:
2693 return perf_event_set_filter(event, (void __user *)arg);
2699 if (flags & PERF_IOC_FLAG_GROUP)
2700 perf_event_for_each(event, func);
2702 perf_event_for_each_child(event, func);
2707 int perf_event_task_enable(void)
2709 struct perf_event *event;
2711 mutex_lock(¤t->perf_event_mutex);
2712 list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
2713 perf_event_for_each_child(event, perf_event_enable);
2714 mutex_unlock(¤t->perf_event_mutex);
2719 int perf_event_task_disable(void)
2721 struct perf_event *event;
2723 mutex_lock(¤t->perf_event_mutex);
2724 list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
2725 perf_event_for_each_child(event, perf_event_disable);
2726 mutex_unlock(¤t->perf_event_mutex);
2731 #ifndef PERF_EVENT_INDEX_OFFSET
2732 # define PERF_EVENT_INDEX_OFFSET 0
2735 static int perf_event_index(struct perf_event *event)
2737 if (event->hw.state & PERF_HES_STOPPED)
2740 if (event->state != PERF_EVENT_STATE_ACTIVE)
2743 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2747 * Callers need to ensure there can be no nesting of this function, otherwise
2748 * the seqlock logic goes bad. We can not serialize this because the arch
2749 * code calls this from NMI context.
2751 void perf_event_update_userpage(struct perf_event *event)
2753 struct perf_event_mmap_page *userpg;
2754 struct perf_buffer *buffer;
2757 buffer = rcu_dereference(event->buffer);
2761 userpg = buffer->user_page;
2764 * Disable preemption so as to not let the corresponding user-space
2765 * spin too long if we get preempted.
2770 userpg->index = perf_event_index(event);
2771 userpg->offset = perf_event_count(event);
2772 if (event->state == PERF_EVENT_STATE_ACTIVE)
2773 userpg->offset -= local64_read(&event->hw.prev_count);
2775 userpg->time_enabled = event->total_time_enabled +
2776 atomic64_read(&event->child_total_time_enabled);
2778 userpg->time_running = event->total_time_running +
2779 atomic64_read(&event->child_total_time_running);
2788 static unsigned long perf_data_size(struct perf_buffer *buffer);
2791 perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags)
2793 long max_size = perf_data_size(buffer);
2796 buffer->watermark = min(max_size, watermark);
2798 if (!buffer->watermark)
2799 buffer->watermark = max_size / 2;
2801 if (flags & PERF_BUFFER_WRITABLE)
2802 buffer->writable = 1;
2804 atomic_set(&buffer->refcount, 1);
2807 #ifndef CONFIG_PERF_USE_VMALLOC
2810 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2813 static struct page *
2814 perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
2816 if (pgoff > buffer->nr_pages)
2820 return virt_to_page(buffer->user_page);
2822 return virt_to_page(buffer->data_pages[pgoff - 1]);
2825 static void *perf_mmap_alloc_page(int cpu)
2830 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
2831 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
2835 return page_address(page);
2838 static struct perf_buffer *
2839 perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
2841 struct perf_buffer *buffer;
2845 size = sizeof(struct perf_buffer);
2846 size += nr_pages * sizeof(void *);
2848 buffer = kzalloc(size, GFP_KERNEL);
2852 buffer->user_page = perf_mmap_alloc_page(cpu);
2853 if (!buffer->user_page)
2854 goto fail_user_page;
2856 for (i = 0; i < nr_pages; i++) {
2857 buffer->data_pages[i] = perf_mmap_alloc_page(cpu);
2858 if (!buffer->data_pages[i])
2859 goto fail_data_pages;
2862 buffer->nr_pages = nr_pages;
2864 perf_buffer_init(buffer, watermark, flags);
2869 for (i--; i >= 0; i--)
2870 free_page((unsigned long)buffer->data_pages[i]);
2872 free_page((unsigned long)buffer->user_page);
2881 static void perf_mmap_free_page(unsigned long addr)
2883 struct page *page = virt_to_page((void *)addr);
2885 page->mapping = NULL;
2889 static void perf_buffer_free(struct perf_buffer *buffer)
2893 perf_mmap_free_page((unsigned long)buffer->user_page);
2894 for (i = 0; i < buffer->nr_pages; i++)
2895 perf_mmap_free_page((unsigned long)buffer->data_pages[i]);
2899 static inline int page_order(struct perf_buffer *buffer)
2907 * Back perf_mmap() with vmalloc memory.
2909 * Required for architectures that have d-cache aliasing issues.
2912 static inline int page_order(struct perf_buffer *buffer)
2914 return buffer->page_order;
2917 static struct page *
2918 perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
2920 if (pgoff > (1UL << page_order(buffer)))
2923 return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE);
2926 static void perf_mmap_unmark_page(void *addr)
2928 struct page *page = vmalloc_to_page(addr);
2930 page->mapping = NULL;
2933 static void perf_buffer_free_work(struct work_struct *work)
2935 struct perf_buffer *buffer;
2939 buffer = container_of(work, struct perf_buffer, work);
2940 nr = 1 << page_order(buffer);
2942 base = buffer->user_page;
2943 for (i = 0; i < nr + 1; i++)
2944 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2950 static void perf_buffer_free(struct perf_buffer *buffer)
2952 schedule_work(&buffer->work);
2955 static struct perf_buffer *
2956 perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
2958 struct perf_buffer *buffer;
2962 size = sizeof(struct perf_buffer);
2963 size += sizeof(void *);
2965 buffer = kzalloc(size, GFP_KERNEL);
2969 INIT_WORK(&buffer->work, perf_buffer_free_work);
2971 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2975 buffer->user_page = all_buf;
2976 buffer->data_pages[0] = all_buf + PAGE_SIZE;
2977 buffer->page_order = ilog2(nr_pages);
2978 buffer->nr_pages = 1;
2980 perf_buffer_init(buffer, watermark, flags);
2993 static unsigned long perf_data_size(struct perf_buffer *buffer)
2995 return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer));
2998 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3000 struct perf_event *event = vma->vm_file->private_data;
3001 struct perf_buffer *buffer;
3002 int ret = VM_FAULT_SIGBUS;
3004 if (vmf->flags & FAULT_FLAG_MKWRITE) {
3005 if (vmf->pgoff == 0)
3011 buffer = rcu_dereference(event->buffer);
3015 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3018 vmf->page = perf_mmap_to_page(buffer, vmf->pgoff);
3022 get_page(vmf->page);
3023 vmf->page->mapping = vma->vm_file->f_mapping;
3024 vmf->page->index = vmf->pgoff;
3033 static void perf_buffer_free_rcu(struct rcu_head *rcu_head)
3035 struct perf_buffer *buffer;
3037 buffer = container_of(rcu_head, struct perf_buffer, rcu_head);
3038 perf_buffer_free(buffer);
3041 static struct perf_buffer *perf_buffer_get(struct perf_event *event)
3043 struct perf_buffer *buffer;
3046 buffer = rcu_dereference(event->buffer);
3048 if (!atomic_inc_not_zero(&buffer->refcount))
3056 static void perf_buffer_put(struct perf_buffer *buffer)
3058 if (!atomic_dec_and_test(&buffer->refcount))
3061 call_rcu(&buffer->rcu_head, perf_buffer_free_rcu);
3064 static void perf_mmap_open(struct vm_area_struct *vma)
3066 struct perf_event *event = vma->vm_file->private_data;
3068 atomic_inc(&event->mmap_count);
3071 static void perf_mmap_close(struct vm_area_struct *vma)
3073 struct perf_event *event = vma->vm_file->private_data;
3075 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
3076 unsigned long size = perf_data_size(event->buffer);
3077 struct user_struct *user = event->mmap_user;
3078 struct perf_buffer *buffer = event->buffer;
3080 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
3081 vma->vm_mm->locked_vm -= event->mmap_locked;
3082 rcu_assign_pointer(event->buffer, NULL);
3083 mutex_unlock(&event->mmap_mutex);
3085 perf_buffer_put(buffer);
3090 static const struct vm_operations_struct perf_mmap_vmops = {
3091 .open = perf_mmap_open,
3092 .close = perf_mmap_close,
3093 .fault = perf_mmap_fault,
3094 .page_mkwrite = perf_mmap_fault,
3097 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3099 struct perf_event *event = file->private_data;
3100 unsigned long user_locked, user_lock_limit;
3101 struct user_struct *user = current_user();
3102 unsigned long locked, lock_limit;
3103 struct perf_buffer *buffer;
3104 unsigned long vma_size;
3105 unsigned long nr_pages;
3106 long user_extra, extra;
3107 int ret = 0, flags = 0;
3110 * Don't allow mmap() of inherited per-task counters. This would
3111 * create a performance issue due to all children writing to the
3114 if (event->cpu == -1 && event->attr.inherit)
3117 if (!(vma->vm_flags & VM_SHARED))
3120 vma_size = vma->vm_end - vma->vm_start;
3121 nr_pages = (vma_size / PAGE_SIZE) - 1;
3124 * If we have buffer pages ensure they're a power-of-two number, so we
3125 * can do bitmasks instead of modulo.
3127 if (nr_pages != 0 && !is_power_of_2(nr_pages))
3130 if (vma_size != PAGE_SIZE * (1 + nr_pages))
3133 if (vma->vm_pgoff != 0)
3136 WARN_ON_ONCE(event->ctx->parent_ctx);
3137 mutex_lock(&event->mmap_mutex);
3138 if (event->buffer) {
3139 if (event->buffer->nr_pages == nr_pages)
3140 atomic_inc(&event->buffer->refcount);
3146 user_extra = nr_pages + 1;
3147 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
3150 * Increase the limit linearly with more CPUs:
3152 user_lock_limit *= num_online_cpus();
3154 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
3157 if (user_locked > user_lock_limit)
3158 extra = user_locked - user_lock_limit;
3160 lock_limit = rlimit(RLIMIT_MEMLOCK);
3161 lock_limit >>= PAGE_SHIFT;
3162 locked = vma->vm_mm->locked_vm + extra;
3164 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3165 !capable(CAP_IPC_LOCK)) {
3170 WARN_ON(event->buffer);
3172 if (vma->vm_flags & VM_WRITE)
3173 flags |= PERF_BUFFER_WRITABLE;
3175 buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark,
3181 rcu_assign_pointer(event->buffer, buffer);
3183 atomic_long_add(user_extra, &user->locked_vm);
3184 event->mmap_locked = extra;
3185 event->mmap_user = get_current_user();
3186 vma->vm_mm->locked_vm += event->mmap_locked;
3190 atomic_inc(&event->mmap_count);
3191 mutex_unlock(&event->mmap_mutex);
3193 vma->vm_flags |= VM_RESERVED;
3194 vma->vm_ops = &perf_mmap_vmops;
3199 static int perf_fasync(int fd, struct file *filp, int on)
3201 struct inode *inode = filp->f_path.dentry->d_inode;
3202 struct perf_event *event = filp->private_data;
3205 mutex_lock(&inode->i_mutex);
3206 retval = fasync_helper(fd, filp, on, &event->fasync);
3207 mutex_unlock(&inode->i_mutex);
3215 static const struct file_operations perf_fops = {
3216 .llseek = no_llseek,
3217 .release = perf_release,
3220 .unlocked_ioctl = perf_ioctl,
3221 .compat_ioctl = perf_ioctl,
3223 .fasync = perf_fasync,
3229 * If there's data, ensure we set the poll() state and publish everything
3230 * to user-space before waking everybody up.
3233 void perf_event_wakeup(struct perf_event *event)
3235 wake_up_all(&event->waitq);
3237 if (event->pending_kill) {
3238 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
3239 event->pending_kill = 0;
3243 static void perf_pending_event(struct irq_work *entry)
3245 struct perf_event *event = container_of(entry,
3246 struct perf_event, pending);
3248 if (event->pending_disable) {
3249 event->pending_disable = 0;
3250 __perf_event_disable(event);
3253 if (event->pending_wakeup) {
3254 event->pending_wakeup = 0;
3255 perf_event_wakeup(event);
3260 * We assume there is only KVM supporting the callbacks.
3261 * Later on, we might change it to a list if there is
3262 * another virtualization implementation supporting the callbacks.
3264 struct perf_guest_info_callbacks *perf_guest_cbs;
3266 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3268 perf_guest_cbs = cbs;
3271 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3273 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3275 perf_guest_cbs = NULL;
3278 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3283 static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail,
3284 unsigned long offset, unsigned long head)
3288 if (!buffer->writable)
3291 mask = perf_data_size(buffer) - 1;
3293 offset = (offset - tail) & mask;
3294 head = (head - tail) & mask;
3296 if ((int)(head - offset) < 0)
3302 static void perf_output_wakeup(struct perf_output_handle *handle)
3304 atomic_set(&handle->buffer->poll, POLL_IN);
3307 handle->event->pending_wakeup = 1;
3308 irq_work_queue(&handle->event->pending);
3310 perf_event_wakeup(handle->event);
3314 * We need to ensure a later event_id doesn't publish a head when a former
3315 * event isn't done writing. However since we need to deal with NMIs we
3316 * cannot fully serialize things.
3318 * We only publish the head (and generate a wakeup) when the outer-most
3321 static void perf_output_get_handle(struct perf_output_handle *handle)
3323 struct perf_buffer *buffer = handle->buffer;
3326 local_inc(&buffer->nest);
3327 handle->wakeup = local_read(&buffer->wakeup);
3330 static void perf_output_put_handle(struct perf_output_handle *handle)
3332 struct perf_buffer *buffer = handle->buffer;
3336 head = local_read(&buffer->head);
3339 * IRQ/NMI can happen here, which means we can miss a head update.
3342 if (!local_dec_and_test(&buffer->nest))
3346 * Publish the known good head. Rely on the full barrier implied
3347 * by atomic_dec_and_test() order the buffer->head read and this
3350 buffer->user_page->data_head = head;
3353 * Now check if we missed an update, rely on the (compiler)
3354 * barrier in atomic_dec_and_test() to re-read buffer->head.
3356 if (unlikely(head != local_read(&buffer->head))) {
3357 local_inc(&buffer->nest);
3361 if (handle->wakeup != local_read(&buffer->wakeup))
3362 perf_output_wakeup(handle);
3368 __always_inline void perf_output_copy(struct perf_output_handle *handle,
3369 const void *buf, unsigned int len)
3372 unsigned long size = min_t(unsigned long, handle->size, len);
3374 memcpy(handle->addr, buf, size);
3377 handle->addr += size;
3379 handle->size -= size;
3380 if (!handle->size) {
3381 struct perf_buffer *buffer = handle->buffer;
3384 handle->page &= buffer->nr_pages - 1;
3385 handle->addr = buffer->data_pages[handle->page];
3386 handle->size = PAGE_SIZE << page_order(buffer);
3391 static void __perf_event_header__init_id(struct perf_event_header *header,
3392 struct perf_sample_data *data,
3393 struct perf_event *event)
3395 u64 sample_type = event->attr.sample_type;
3397 data->type = sample_type;
3398 header->size += event->id_header_size;
3400 if (sample_type & PERF_SAMPLE_TID) {
3401 /* namespace issues */
3402 data->tid_entry.pid = perf_event_pid(event, current);
3403 data->tid_entry.tid = perf_event_tid(event, current);
3406 if (sample_type & PERF_SAMPLE_TIME)
3407 data->time = perf_clock();
3409 if (sample_type & PERF_SAMPLE_ID)
3410 data->id = primary_event_id(event);
3412 if (sample_type & PERF_SAMPLE_STREAM_ID)
3413 data->stream_id = event->id;
3415 if (sample_type & PERF_SAMPLE_CPU) {
3416 data->cpu_entry.cpu = raw_smp_processor_id();
3417 data->cpu_entry.reserved = 0;
3421 static void perf_event_header__init_id(struct perf_event_header *header,
3422 struct perf_sample_data *data,
3423 struct perf_event *event)
3425 if (event->attr.sample_id_all)
3426 __perf_event_header__init_id(header, data, event);
3429 static void __perf_event__output_id_sample(struct perf_output_handle *handle,
3430 struct perf_sample_data *data)
3432 u64 sample_type = data->type;
3434 if (sample_type & PERF_SAMPLE_TID)
3435 perf_output_put(handle, data->tid_entry);
3437 if (sample_type & PERF_SAMPLE_TIME)
3438 perf_output_put(handle, data->time);
3440 if (sample_type & PERF_SAMPLE_ID)
3441 perf_output_put(handle, data->id);
3443 if (sample_type & PERF_SAMPLE_STREAM_ID)
3444 perf_output_put(handle, data->stream_id);
3446 if (sample_type & PERF_SAMPLE_CPU)
3447 perf_output_put(handle, data->cpu_entry);
3450 static void perf_event__output_id_sample(struct perf_event *event,
3451 struct perf_output_handle *handle,
3452 struct perf_sample_data *sample)
3454 if (event->attr.sample_id_all)
3455 __perf_event__output_id_sample(handle, sample);
3458 int perf_output_begin(struct perf_output_handle *handle,
3459 struct perf_event *event, unsigned int size,
3460 int nmi, int sample)
3462 struct perf_buffer *buffer;
3463 unsigned long tail, offset, head;
3465 struct perf_sample_data sample_data;
3467 struct perf_event_header header;
3474 * For inherited events we send all the output towards the parent.
3477 event = event->parent;
3479 buffer = rcu_dereference(event->buffer);
3483 handle->buffer = buffer;
3484 handle->event = event;
3486 handle->sample = sample;
3488 if (!buffer->nr_pages)
3491 have_lost = local_read(&buffer->lost);
3493 lost_event.header.size = sizeof(lost_event);
3494 perf_event_header__init_id(&lost_event.header, &sample_data,
3496 size += lost_event.header.size;
3499 perf_output_get_handle(handle);
3503 * Userspace could choose to issue a mb() before updating the
3504 * tail pointer. So that all reads will be completed before the
3507 tail = ACCESS_ONCE(buffer->user_page->data_tail);
3509 offset = head = local_read(&buffer->head);
3511 if (unlikely(!perf_output_space(buffer, tail, offset, head)))
3513 } while (local_cmpxchg(&buffer->head, offset, head) != offset);
3515 if (head - local_read(&buffer->wakeup) > buffer->watermark)
3516 local_add(buffer->watermark, &buffer->wakeup);
3518 handle->page = offset >> (PAGE_SHIFT + page_order(buffer));
3519 handle->page &= buffer->nr_pages - 1;
3520 handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1);
3521 handle->addr = buffer->data_pages[handle->page];
3522 handle->addr += handle->size;
3523 handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size;
3526 lost_event.header.type = PERF_RECORD_LOST;
3527 lost_event.header.misc = 0;
3528 lost_event.id = event->id;
3529 lost_event.lost = local_xchg(&buffer->lost, 0);
3531 perf_output_put(handle, lost_event);
3532 perf_event__output_id_sample(event, handle, &sample_data);
3538 local_inc(&buffer->lost);
3539 perf_output_put_handle(handle);
3546 void perf_output_end(struct perf_output_handle *handle)
3548 struct perf_event *event = handle->event;
3549 struct perf_buffer *buffer = handle->buffer;
3551 int wakeup_events = event->attr.wakeup_events;
3553 if (handle->sample && wakeup_events) {
3554 int events = local_inc_return(&buffer->events);
3555 if (events >= wakeup_events) {
3556 local_sub(wakeup_events, &buffer->events);
3557 local_inc(&buffer->wakeup);
3561 perf_output_put_handle(handle);
3565 static void perf_output_read_one(struct perf_output_handle *handle,
3566 struct perf_event *event,
3567 u64 enabled, u64 running)
3569 u64 read_format = event->attr.read_format;
3573 values[n++] = perf_event_count(event);
3574 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3575 values[n++] = enabled +
3576 atomic64_read(&event->child_total_time_enabled);
3578 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3579 values[n++] = running +
3580 atomic64_read(&event->child_total_time_running);
3582 if (read_format & PERF_FORMAT_ID)
3583 values[n++] = primary_event_id(event);
3585 perf_output_copy(handle, values, n * sizeof(u64));
3589 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3591 static void perf_output_read_group(struct perf_output_handle *handle,
3592 struct perf_event *event,
3593 u64 enabled, u64 running)
3595 struct perf_event *leader = event->group_leader, *sub;
3596 u64 read_format = event->attr.read_format;
3600 values[n++] = 1 + leader->nr_siblings;
3602 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3603 values[n++] = enabled;
3605 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3606 values[n++] = running;
3608 if (leader != event)
3609 leader->pmu->read(leader);
3611 values[n++] = perf_event_count(leader);
3612 if (read_format & PERF_FORMAT_ID)
3613 values[n++] = primary_event_id(leader);
3615 perf_output_copy(handle, values, n * sizeof(u64));
3617 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3621 sub->pmu->read(sub);
3623 values[n++] = perf_event_count(sub);
3624 if (read_format & PERF_FORMAT_ID)
3625 values[n++] = primary_event_id(sub);
3627 perf_output_copy(handle, values, n * sizeof(u64));
3631 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
3632 PERF_FORMAT_TOTAL_TIME_RUNNING)
3634 static void perf_output_read(struct perf_output_handle *handle,
3635 struct perf_event *event)
3637 u64 enabled = 0, running = 0, now, ctx_time;
3638 u64 read_format = event->attr.read_format;
3641 * compute total_time_enabled, total_time_running
3642 * based on snapshot values taken when the event
3643 * was last scheduled in.
3645 * we cannot simply called update_context_time()
3646 * because of locking issue as we are called in
3649 if (read_format & PERF_FORMAT_TOTAL_TIMES) {
3651 ctx_time = event->shadow_ctx_time + now;
3652 enabled = ctx_time - event->tstamp_enabled;
3653 running = ctx_time - event->tstamp_running;
3656 if (event->attr.read_format & PERF_FORMAT_GROUP)
3657 perf_output_read_group(handle, event, enabled, running);
3659 perf_output_read_one(handle, event, enabled, running);
3662 void perf_output_sample(struct perf_output_handle *handle,
3663 struct perf_event_header *header,
3664 struct perf_sample_data *data,
3665 struct perf_event *event)
3667 u64 sample_type = data->type;
3669 perf_output_put(handle, *header);
3671 if (sample_type & PERF_SAMPLE_IP)
3672 perf_output_put(handle, data->ip);
3674 if (sample_type & PERF_SAMPLE_TID)
3675 perf_output_put(handle, data->tid_entry);
3677 if (sample_type & PERF_SAMPLE_TIME)
3678 perf_output_put(handle, data->time);
3680 if (sample_type & PERF_SAMPLE_ADDR)
3681 perf_output_put(handle, data->addr);
3683 if (sample_type & PERF_SAMPLE_ID)
3684 perf_output_put(handle, data->id);
3686 if (sample_type & PERF_SAMPLE_STREAM_ID)
3687 perf_output_put(handle, data->stream_id);
3689 if (sample_type & PERF_SAMPLE_CPU)
3690 perf_output_put(handle, data->cpu_entry);
3692 if (sample_type & PERF_SAMPLE_PERIOD)
3693 perf_output_put(handle, data->period);
3695 if (sample_type & PERF_SAMPLE_READ)
3696 perf_output_read(handle, event);
3698 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3699 if (data->callchain) {
3702 if (data->callchain)
3703 size += data->callchain->nr;
3705 size *= sizeof(u64);
3707 perf_output_copy(handle, data->callchain, size);
3710 perf_output_put(handle, nr);
3714 if (sample_type & PERF_SAMPLE_RAW) {
3716 perf_output_put(handle, data->raw->size);
3717 perf_output_copy(handle, data->raw->data,
3724 .size = sizeof(u32),
3727 perf_output_put(handle, raw);
3732 void perf_prepare_sample(struct perf_event_header *header,
3733 struct perf_sample_data *data,
3734 struct perf_event *event,
3735 struct pt_regs *regs)
3737 u64 sample_type = event->attr.sample_type;
3739 header->type = PERF_RECORD_SAMPLE;
3740 header->size = sizeof(*header) + event->header_size;
3743 header->misc |= perf_misc_flags(regs);
3745 __perf_event_header__init_id(header, data, event);
3747 if (sample_type & PERF_SAMPLE_IP)
3748 data->ip = perf_instruction_pointer(regs);
3750 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3753 data->callchain = perf_callchain(regs);
3755 if (data->callchain)
3756 size += data->callchain->nr;
3758 header->size += size * sizeof(u64);
3761 if (sample_type & PERF_SAMPLE_RAW) {
3762 int size = sizeof(u32);
3765 size += data->raw->size;
3767 size += sizeof(u32);
3769 WARN_ON_ONCE(size & (sizeof(u64)-1));
3770 header->size += size;
3774 static void perf_event_output(struct perf_event *event, int nmi,
3775 struct perf_sample_data *data,
3776 struct pt_regs *regs)
3778 struct perf_output_handle handle;
3779 struct perf_event_header header;
3781 /* protect the callchain buffers */
3784 perf_prepare_sample(&header, data, event, regs);
3786 if (perf_output_begin(&handle, event, header.size, nmi, 1))
3789 perf_output_sample(&handle, &header, data, event);
3791 perf_output_end(&handle);
3801 struct perf_read_event {
3802 struct perf_event_header header;
3809 perf_event_read_event(struct perf_event *event,
3810 struct task_struct *task)
3812 struct perf_output_handle handle;
3813 struct perf_sample_data sample;
3814 struct perf_read_event read_event = {
3816 .type = PERF_RECORD_READ,
3818 .size = sizeof(read_event) + event->read_size,
3820 .pid = perf_event_pid(event, task),
3821 .tid = perf_event_tid(event, task),
3825 perf_event_header__init_id(&read_event.header, &sample, event);
3826 ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3830 perf_output_put(&handle, read_event);
3831 perf_output_read(&handle, event);
3832 perf_event__output_id_sample(event, &handle, &sample);
3834 perf_output_end(&handle);
3838 * task tracking -- fork/exit
3840 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
3843 struct perf_task_event {
3844 struct task_struct *task;
3845 struct perf_event_context *task_ctx;
3848 struct perf_event_header header;
3858 static void perf_event_task_output(struct perf_event *event,
3859 struct perf_task_event *task_event)
3861 struct perf_output_handle handle;
3862 struct perf_sample_data sample;
3863 struct task_struct *task = task_event->task;
3864 int ret, size = task_event->event_id.header.size;
3866 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
3868 ret = perf_output_begin(&handle, event,
3869 task_event->event_id.header.size, 0, 0);
3873 task_event->event_id.pid = perf_event_pid(event, task);
3874 task_event->event_id.ppid = perf_event_pid(event, current);
3876 task_event->event_id.tid = perf_event_tid(event, task);
3877 task_event->event_id.ptid = perf_event_tid(event, current);
3879 perf_output_put(&handle, task_event->event_id);
3881 perf_event__output_id_sample(event, &handle, &sample);
3883 perf_output_end(&handle);
3885 task_event->event_id.header.size = size;
3888 static int perf_event_task_match(struct perf_event *event)
3890 if (event->state < PERF_EVENT_STATE_INACTIVE)
3893 if (event->cpu != -1 && event->cpu != smp_processor_id())
3896 if (event->attr.comm || event->attr.mmap ||
3897 event->attr.mmap_data || event->attr.task)
3903 static void perf_event_task_ctx(struct perf_event_context *ctx,
3904 struct perf_task_event *task_event)
3906 struct perf_event *event;
3908 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3909 if (perf_event_task_match(event))
3910 perf_event_task_output(event, task_event);
3914 static void perf_event_task_event(struct perf_task_event *task_event)
3916 struct perf_cpu_context *cpuctx;
3917 struct perf_event_context *ctx;
3922 list_for_each_entry_rcu(pmu, &pmus, entry) {
3923 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
3924 perf_event_task_ctx(&cpuctx->ctx, task_event);
3926 ctx = task_event->task_ctx;
3928 ctxn = pmu->task_ctx_nr;
3931 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
3934 perf_event_task_ctx(ctx, task_event);
3936 put_cpu_ptr(pmu->pmu_cpu_context);
3941 static void perf_event_task(struct task_struct *task,
3942 struct perf_event_context *task_ctx,
3945 struct perf_task_event task_event;
3947 if (!atomic_read(&nr_comm_events) &&
3948 !atomic_read(&nr_mmap_events) &&
3949 !atomic_read(&nr_task_events))
3952 task_event = (struct perf_task_event){
3954 .task_ctx = task_ctx,
3957 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3959 .size = sizeof(task_event.event_id),
3965 .time = perf_clock(),
3969 perf_event_task_event(&task_event);
3972 void perf_event_fork(struct task_struct *task)
3974 perf_event_task(task, NULL, 1);
3981 struct perf_comm_event {
3982 struct task_struct *task;
3987 struct perf_event_header header;
3994 static void perf_event_comm_output(struct perf_event *event,
3995 struct perf_comm_event *comm_event)
3997 struct perf_output_handle handle;
3998 struct perf_sample_data sample;
3999 int size = comm_event->event_id.header.size;
4002 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
4003 ret = perf_output_begin(&handle, event,
4004 comm_event->event_id.header.size, 0, 0);
4009 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
4010 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
4012 perf_output_put(&handle, comm_event->event_id);
4013 perf_output_copy(&handle, comm_event->comm,
4014 comm_event->comm_size);
4016 perf_event__output_id_sample(event, &handle, &sample);
4018 perf_output_end(&handle);
4020 comm_event->event_id.header.size = size;
4023 static int perf_event_comm_match(struct perf_event *event)
4025 if (event->state < PERF_EVENT_STATE_INACTIVE)
4028 if (event->cpu != -1 && event->cpu != smp_processor_id())
4031 if (event->attr.comm)
4037 static void perf_event_comm_ctx(struct perf_event_context *ctx,
4038 struct perf_comm_event *comm_event)
4040 struct perf_event *event;
4042 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4043 if (perf_event_comm_match(event))
4044 perf_event_comm_output(event, comm_event);
4048 static void perf_event_comm_event(struct perf_comm_event *comm_event)
4050 struct perf_cpu_context *cpuctx;
4051 struct perf_event_context *ctx;
4052 char comm[TASK_COMM_LEN];
4057 memset(comm, 0, sizeof(comm));
4058 strlcpy(comm, comm_event->task->comm, sizeof(comm));
4059 size = ALIGN(strlen(comm)+1, sizeof(u64));
4061 comm_event->comm = comm;
4062 comm_event->comm_size = size;
4064 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
4066 list_for_each_entry_rcu(pmu, &pmus, entry) {
4067 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4068 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
4070 ctxn = pmu->task_ctx_nr;
4074 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4076 perf_event_comm_ctx(ctx, comm_event);
4078 put_cpu_ptr(pmu->pmu_cpu_context);
4083 void perf_event_comm(struct task_struct *task)
4085 struct perf_comm_event comm_event;
4086 struct perf_event_context *ctx;
4089 for_each_task_context_nr(ctxn) {
4090 ctx = task->perf_event_ctxp[ctxn];
4094 perf_event_enable_on_exec(ctx);
4097 if (!atomic_read(&nr_comm_events))
4100 comm_event = (struct perf_comm_event){
4106 .type = PERF_RECORD_COMM,
4115 perf_event_comm_event(&comm_event);
4122 struct perf_mmap_event {
4123 struct vm_area_struct *vma;
4125 const char *file_name;
4129 struct perf_event_header header;
4139 static void perf_event_mmap_output(struct perf_event *event,
4140 struct perf_mmap_event *mmap_event)
4142 struct perf_output_handle handle;
4143 struct perf_sample_data sample;
4144 int size = mmap_event->event_id.header.size;
4147 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
4148 ret = perf_output_begin(&handle, event,
4149 mmap_event->event_id.header.size, 0, 0);
4153 mmap_event->event_id.pid = perf_event_pid(event, current);
4154 mmap_event->event_id.tid = perf_event_tid(event, current);
4156 perf_output_put(&handle, mmap_event->event_id);
4157 perf_output_copy(&handle, mmap_event->file_name,
4158 mmap_event->file_size);
4160 perf_event__output_id_sample(event, &handle, &sample);
4162 perf_output_end(&handle);
4164 mmap_event->event_id.header.size = size;
4167 static int perf_event_mmap_match(struct perf_event *event,
4168 struct perf_mmap_event *mmap_event,
4171 if (event->state < PERF_EVENT_STATE_INACTIVE)
4174 if (event->cpu != -1 && event->cpu != smp_processor_id())
4177 if ((!executable && event->attr.mmap_data) ||
4178 (executable && event->attr.mmap))
4184 static void perf_event_mmap_ctx(struct perf_event_context *ctx,
4185 struct perf_mmap_event *mmap_event,
4188 struct perf_event *event;
4190 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4191 if (perf_event_mmap_match(event, mmap_event, executable))
4192 perf_event_mmap_output(event, mmap_event);
4196 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4198 struct perf_cpu_context *cpuctx;
4199 struct perf_event_context *ctx;
4200 struct vm_area_struct *vma = mmap_event->vma;
4201 struct file *file = vma->vm_file;
4209 memset(tmp, 0, sizeof(tmp));
4213 * d_path works from the end of the buffer backwards, so we
4214 * need to add enough zero bytes after the string to handle
4215 * the 64bit alignment we do later.
4217 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
4219 name = strncpy(tmp, "//enomem", sizeof(tmp));
4222 name = d_path(&file->f_path, buf, PATH_MAX);
4224 name = strncpy(tmp, "//toolong", sizeof(tmp));
4228 if (arch_vma_name(mmap_event->vma)) {
4229 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4235 name = strncpy(tmp, "[vdso]", sizeof(tmp));
4237 } else if (vma->vm_start <= vma->vm_mm->start_brk &&
4238 vma->vm_end >= vma->vm_mm->brk) {
4239 name = strncpy(tmp, "[heap]", sizeof(tmp));
4241 } else if (vma->vm_start <= vma->vm_mm->start_stack &&
4242 vma->vm_end >= vma->vm_mm->start_stack) {
4243 name = strncpy(tmp, "[stack]", sizeof(tmp));
4247 name = strncpy(tmp, "//anon", sizeof(tmp));
4252 size = ALIGN(strlen(name)+1, sizeof(u64));
4254 mmap_event->file_name = name;
4255 mmap_event->file_size = size;
4257 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4260 list_for_each_entry_rcu(pmu, &pmus, entry) {
4261 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4262 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4263 vma->vm_flags & VM_EXEC);
4265 ctxn = pmu->task_ctx_nr;
4269 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4271 perf_event_mmap_ctx(ctx, mmap_event,
4272 vma->vm_flags & VM_EXEC);
4275 put_cpu_ptr(pmu->pmu_cpu_context);
4282 void perf_event_mmap(struct vm_area_struct *vma)
4284 struct perf_mmap_event mmap_event;
4286 if (!atomic_read(&nr_mmap_events))
4289 mmap_event = (struct perf_mmap_event){
4295 .type = PERF_RECORD_MMAP,
4296 .misc = PERF_RECORD_MISC_USER,
4301 .start = vma->vm_start,
4302 .len = vma->vm_end - vma->vm_start,
4303 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
4307 perf_event_mmap_event(&mmap_event);
4311 * IRQ throttle logging
4314 static void perf_log_throttle(struct perf_event *event, int enable)
4316 struct perf_output_handle handle;
4317 struct perf_sample_data sample;
4321 struct perf_event_header header;
4325 } throttle_event = {
4327 .type = PERF_RECORD_THROTTLE,
4329 .size = sizeof(throttle_event),
4331 .time = perf_clock(),
4332 .id = primary_event_id(event),
4333 .stream_id = event->id,
4337 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4339 perf_event_header__init_id(&throttle_event.header, &sample, event);
4341 ret = perf_output_begin(&handle, event,
4342 throttle_event.header.size, 1, 0);
4346 perf_output_put(&handle, throttle_event);
4347 perf_event__output_id_sample(event, &handle, &sample);
4348 perf_output_end(&handle);
4352 * Generic event overflow handling, sampling.
4355 static int __perf_event_overflow(struct perf_event *event, int nmi,
4356 int throttle, struct perf_sample_data *data,
4357 struct pt_regs *regs)
4359 int events = atomic_read(&event->event_limit);
4360 struct hw_perf_event *hwc = &event->hw;
4364 * Non-sampling counters might still use the PMI to fold short
4365 * hardware counters, ignore those.
4367 if (unlikely(!is_sampling_event(event)))
4373 if (hwc->interrupts != MAX_INTERRUPTS) {
4375 if (HZ * hwc->interrupts >
4376 (u64)sysctl_perf_event_sample_rate) {
4377 hwc->interrupts = MAX_INTERRUPTS;
4378 perf_log_throttle(event, 0);
4383 * Keep re-disabling events even though on the previous
4384 * pass we disabled it - just in case we raced with a
4385 * sched-in and the event got enabled again:
4391 if (event->attr.freq) {
4392 u64 now = perf_clock();
4393 s64 delta = now - hwc->freq_time_stamp;
4395 hwc->freq_time_stamp = now;
4397 if (delta > 0 && delta < 2*TICK_NSEC)
4398 perf_adjust_period(event, delta, hwc->last_period);
4402 * XXX event_limit might not quite work as expected on inherited
4406 event->pending_kill = POLL_IN;
4407 if (events && atomic_dec_and_test(&event->event_limit)) {
4409 event->pending_kill = POLL_HUP;
4411 event->pending_disable = 1;
4412 irq_work_queue(&event->pending);
4414 perf_event_disable(event);
4417 if (event->overflow_handler)
4418 event->overflow_handler(event, nmi, data, regs);
4420 perf_event_output(event, nmi, data, regs);
4425 int perf_event_overflow(struct perf_event *event, int nmi,
4426 struct perf_sample_data *data,
4427 struct pt_regs *regs)
4429 return __perf_event_overflow(event, nmi, 1, data, regs);
4433 * Generic software event infrastructure
4436 struct swevent_htable {
4437 struct swevent_hlist *swevent_hlist;
4438 struct mutex hlist_mutex;
4441 /* Recursion avoidance in each contexts */
4442 int recursion[PERF_NR_CONTEXTS];
4445 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
4448 * We directly increment event->count and keep a second value in
4449 * event->hw.period_left to count intervals. This period event
4450 * is kept in the range [-sample_period, 0] so that we can use the
4454 static u64 perf_swevent_set_period(struct perf_event *event)
4456 struct hw_perf_event *hwc = &event->hw;
4457 u64 period = hwc->last_period;
4461 hwc->last_period = hwc->sample_period;
4464 old = val = local64_read(&hwc->period_left);
4468 nr = div64_u64(period + val, period);
4469 offset = nr * period;
4471 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
4477 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
4478 int nmi, struct perf_sample_data *data,
4479 struct pt_regs *regs)
4481 struct hw_perf_event *hwc = &event->hw;
4484 data->period = event->hw.last_period;
4486 overflow = perf_swevent_set_period(event);
4488 if (hwc->interrupts == MAX_INTERRUPTS)
4491 for (; overflow; overflow--) {
4492 if (__perf_event_overflow(event, nmi, throttle,
4495 * We inhibit the overflow from happening when
4496 * hwc->interrupts == MAX_INTERRUPTS.
4504 static void perf_swevent_event(struct perf_event *event, u64 nr,
4505 int nmi, struct perf_sample_data *data,
4506 struct pt_regs *regs)
4508 struct hw_perf_event *hwc = &event->hw;
4510 local64_add(nr, &event->count);
4515 if (!is_sampling_event(event))
4518 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4519 return perf_swevent_overflow(event, 1, nmi, data, regs);
4521 if (local64_add_negative(nr, &hwc->period_left))
4524 perf_swevent_overflow(event, 0, nmi, data, regs);
4527 static int perf_exclude_event(struct perf_event *event,
4528 struct pt_regs *regs)
4530 if (event->hw.state & PERF_HES_STOPPED)
4534 if (event->attr.exclude_user && user_mode(regs))
4537 if (event->attr.exclude_kernel && !user_mode(regs))
4544 static int perf_swevent_match(struct perf_event *event,
4545 enum perf_type_id type,
4547 struct perf_sample_data *data,
4548 struct pt_regs *regs)
4550 if (event->attr.type != type)
4553 if (event->attr.config != event_id)
4556 if (perf_exclude_event(event, regs))
4562 static inline u64 swevent_hash(u64 type, u32 event_id)
4564 u64 val = event_id | (type << 32);
4566 return hash_64(val, SWEVENT_HLIST_BITS);
4569 static inline struct hlist_head *
4570 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
4572 u64 hash = swevent_hash(type, event_id);
4574 return &hlist->heads[hash];
4577 /* For the read side: events when they trigger */
4578 static inline struct hlist_head *
4579 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
4581 struct swevent_hlist *hlist;
4583 hlist = rcu_dereference(swhash->swevent_hlist);
4587 return __find_swevent_head(hlist, type, event_id);
4590 /* For the event head insertion and removal in the hlist */
4591 static inline struct hlist_head *
4592 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
4594 struct swevent_hlist *hlist;
4595 u32 event_id = event->attr.config;
4596 u64 type = event->attr.type;
4599 * Event scheduling is always serialized against hlist allocation
4600 * and release. Which makes the protected version suitable here.
4601 * The context lock guarantees that.
4603 hlist = rcu_dereference_protected(swhash->swevent_hlist,
4604 lockdep_is_held(&event->ctx->lock));
4608 return __find_swevent_head(hlist, type, event_id);
4611 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4613 struct perf_sample_data *data,
4614 struct pt_regs *regs)
4616 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4617 struct perf_event *event;
4618 struct hlist_node *node;
4619 struct hlist_head *head;
4622 head = find_swevent_head_rcu(swhash, type, event_id);
4626 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4627 if (perf_swevent_match(event, type, event_id, data, regs))
4628 perf_swevent_event(event, nr, nmi, data, regs);
4634 int perf_swevent_get_recursion_context(void)
4636 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4638 return get_recursion_context(swhash->recursion);
4640 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
4642 void inline perf_swevent_put_recursion_context(int rctx)
4644 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4646 put_recursion_context(swhash->recursion, rctx);
4649 void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4650 struct pt_regs *regs, u64 addr)
4652 struct perf_sample_data data;
4655 preempt_disable_notrace();
4656 rctx = perf_swevent_get_recursion_context();
4660 perf_sample_data_init(&data, addr);
4662 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
4664 perf_swevent_put_recursion_context(rctx);
4665 preempt_enable_notrace();
4668 static void perf_swevent_read(struct perf_event *event)
4672 static int perf_swevent_add(struct perf_event *event, int flags)
4674 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4675 struct hw_perf_event *hwc = &event->hw;
4676 struct hlist_head *head;
4678 if (is_sampling_event(event)) {
4679 hwc->last_period = hwc->sample_period;
4680 perf_swevent_set_period(event);
4683 hwc->state = !(flags & PERF_EF_START);
4685 head = find_swevent_head(swhash, event);
4686 if (WARN_ON_ONCE(!head))
4689 hlist_add_head_rcu(&event->hlist_entry, head);
4694 static void perf_swevent_del(struct perf_event *event, int flags)
4696 hlist_del_rcu(&event->hlist_entry);
4699 static void perf_swevent_start(struct perf_event *event, int flags)
4701 event->hw.state = 0;
4704 static void perf_swevent_stop(struct perf_event *event, int flags)
4706 event->hw.state = PERF_HES_STOPPED;
4709 /* Deref the hlist from the update side */
4710 static inline struct swevent_hlist *
4711 swevent_hlist_deref(struct swevent_htable *swhash)
4713 return rcu_dereference_protected(swhash->swevent_hlist,
4714 lockdep_is_held(&swhash->hlist_mutex));
4717 static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
4719 struct swevent_hlist *hlist;
4721 hlist = container_of(rcu_head, struct swevent_hlist, rcu_head);
4725 static void swevent_hlist_release(struct swevent_htable *swhash)
4727 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
4732 rcu_assign_pointer(swhash->swevent_hlist, NULL);
4733 call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
4736 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
4738 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
4740 mutex_lock(&swhash->hlist_mutex);
4742 if (!--swhash->hlist_refcount)
4743 swevent_hlist_release(swhash);
4745 mutex_unlock(&swhash->hlist_mutex);
4748 static void swevent_hlist_put(struct perf_event *event)
4752 if (event->cpu != -1) {
4753 swevent_hlist_put_cpu(event, event->cpu);
4757 for_each_possible_cpu(cpu)
4758 swevent_hlist_put_cpu(event, cpu);
4761 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
4763 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
4766 mutex_lock(&swhash->hlist_mutex);
4768 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
4769 struct swevent_hlist *hlist;
4771 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
4776 rcu_assign_pointer(swhash->swevent_hlist, hlist);
4778 swhash->hlist_refcount++;
4780 mutex_unlock(&swhash->hlist_mutex);
4785 static int swevent_hlist_get(struct perf_event *event)
4788 int cpu, failed_cpu;
4790 if (event->cpu != -1)
4791 return swevent_hlist_get_cpu(event, event->cpu);
4794 for_each_possible_cpu(cpu) {
4795 err = swevent_hlist_get_cpu(event, cpu);
4805 for_each_possible_cpu(cpu) {
4806 if (cpu == failed_cpu)
4808 swevent_hlist_put_cpu(event, cpu);
4815 atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
4817 static void sw_perf_event_destroy(struct perf_event *event)
4819 u64 event_id = event->attr.config;
4821 WARN_ON(event->parent);
4823 jump_label_dec(&perf_swevent_enabled[event_id]);
4824 swevent_hlist_put(event);
4827 static int perf_swevent_init(struct perf_event *event)
4829 int event_id = event->attr.config;
4831 if (event->attr.type != PERF_TYPE_SOFTWARE)
4835 case PERF_COUNT_SW_CPU_CLOCK:
4836 case PERF_COUNT_SW_TASK_CLOCK:
4843 if (event_id > PERF_COUNT_SW_MAX)
4846 if (!event->parent) {
4849 err = swevent_hlist_get(event);
4853 jump_label_inc(&perf_swevent_enabled[event_id]);
4854 event->destroy = sw_perf_event_destroy;
4860 static struct pmu perf_swevent = {
4861 .task_ctx_nr = perf_sw_context,
4863 .event_init = perf_swevent_init,
4864 .add = perf_swevent_add,
4865 .del = perf_swevent_del,
4866 .start = perf_swevent_start,
4867 .stop = perf_swevent_stop,
4868 .read = perf_swevent_read,
4871 #ifdef CONFIG_EVENT_TRACING
4873 static int perf_tp_filter_match(struct perf_event *event,
4874 struct perf_sample_data *data)
4876 void *record = data->raw->data;
4878 if (likely(!event->filter) || filter_match_preds(event->filter, record))
4883 static int perf_tp_event_match(struct perf_event *event,
4884 struct perf_sample_data *data,
4885 struct pt_regs *regs)
4888 * All tracepoints are from kernel-space.
4890 if (event->attr.exclude_kernel)
4893 if (!perf_tp_filter_match(event, data))
4899 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
4900 struct pt_regs *regs, struct hlist_head *head, int rctx)
4902 struct perf_sample_data data;
4903 struct perf_event *event;
4904 struct hlist_node *node;
4906 struct perf_raw_record raw = {
4911 perf_sample_data_init(&data, addr);
4914 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4915 if (perf_tp_event_match(event, &data, regs))
4916 perf_swevent_event(event, count, 1, &data, regs);
4919 perf_swevent_put_recursion_context(rctx);
4921 EXPORT_SYMBOL_GPL(perf_tp_event);
4923 static void tp_perf_event_destroy(struct perf_event *event)
4925 perf_trace_destroy(event);
4928 static int perf_tp_event_init(struct perf_event *event)
4932 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4935 err = perf_trace_init(event);
4939 event->destroy = tp_perf_event_destroy;
4944 static struct pmu perf_tracepoint = {
4945 .task_ctx_nr = perf_sw_context,
4947 .event_init = perf_tp_event_init,
4948 .add = perf_trace_add,
4949 .del = perf_trace_del,
4950 .start = perf_swevent_start,
4951 .stop = perf_swevent_stop,
4952 .read = perf_swevent_read,
4955 static inline void perf_tp_register(void)
4957 perf_pmu_register(&perf_tracepoint);
4960 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4965 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4968 filter_str = strndup_user(arg, PAGE_SIZE);
4969 if (IS_ERR(filter_str))
4970 return PTR_ERR(filter_str);
4972 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
4978 static void perf_event_free_filter(struct perf_event *event)
4980 ftrace_profile_free_filter(event);
4985 static inline void perf_tp_register(void)
4989 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4994 static void perf_event_free_filter(struct perf_event *event)
4998 #endif /* CONFIG_EVENT_TRACING */
5000 #ifdef CONFIG_HAVE_HW_BREAKPOINT
5001 void perf_bp_event(struct perf_event *bp, void *data)
5003 struct perf_sample_data sample;
5004 struct pt_regs *regs = data;
5006 perf_sample_data_init(&sample, bp->attr.bp_addr);
5008 if (!bp->hw.state && !perf_exclude_event(bp, regs))
5009 perf_swevent_event(bp, 1, 1, &sample, regs);
5014 * hrtimer based swevent callback
5017 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
5019 enum hrtimer_restart ret = HRTIMER_RESTART;
5020 struct perf_sample_data data;
5021 struct pt_regs *regs;
5022 struct perf_event *event;
5025 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
5026 event->pmu->read(event);
5028 perf_sample_data_init(&data, 0);
5029 data.period = event->hw.last_period;
5030 regs = get_irq_regs();
5032 if (regs && !perf_exclude_event(event, regs)) {
5033 if (!(event->attr.exclude_idle && current->pid == 0))
5034 if (perf_event_overflow(event, 0, &data, regs))
5035 ret = HRTIMER_NORESTART;
5038 period = max_t(u64, 10000, event->hw.sample_period);
5039 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
5044 static void perf_swevent_start_hrtimer(struct perf_event *event)
5046 struct hw_perf_event *hwc = &event->hw;
5049 if (!is_sampling_event(event))
5052 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5053 hwc->hrtimer.function = perf_swevent_hrtimer;
5055 period = local64_read(&hwc->period_left);
5060 local64_set(&hwc->period_left, 0);
5062 period = max_t(u64, 10000, hwc->sample_period);
5064 __hrtimer_start_range_ns(&hwc->hrtimer,
5065 ns_to_ktime(period), 0,
5066 HRTIMER_MODE_REL_PINNED, 0);
5069 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
5071 struct hw_perf_event *hwc = &event->hw;
5073 if (is_sampling_event(event)) {
5074 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
5075 local64_set(&hwc->period_left, ktime_to_ns(remaining));
5077 hrtimer_cancel(&hwc->hrtimer);
5082 * Software event: cpu wall time clock
5085 static void cpu_clock_event_update(struct perf_event *event)
5090 now = local_clock();
5091 prev = local64_xchg(&event->hw.prev_count, now);
5092 local64_add(now - prev, &event->count);
5095 static void cpu_clock_event_start(struct perf_event *event, int flags)
5097 local64_set(&event->hw.prev_count, local_clock());
5098 perf_swevent_start_hrtimer(event);
5101 static void cpu_clock_event_stop(struct perf_event *event, int flags)
5103 perf_swevent_cancel_hrtimer(event);
5104 cpu_clock_event_update(event);
5107 static int cpu_clock_event_add(struct perf_event *event, int flags)
5109 if (flags & PERF_EF_START)
5110 cpu_clock_event_start(event, flags);
5115 static void cpu_clock_event_del(struct perf_event *event, int flags)
5117 cpu_clock_event_stop(event, flags);
5120 static void cpu_clock_event_read(struct perf_event *event)
5122 cpu_clock_event_update(event);
5125 static int cpu_clock_event_init(struct perf_event *event)
5127 if (event->attr.type != PERF_TYPE_SOFTWARE)
5130 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
5136 static struct pmu perf_cpu_clock = {
5137 .task_ctx_nr = perf_sw_context,
5139 .event_init = cpu_clock_event_init,
5140 .add = cpu_clock_event_add,
5141 .del = cpu_clock_event_del,
5142 .start = cpu_clock_event_start,
5143 .stop = cpu_clock_event_stop,
5144 .read = cpu_clock_event_read,
5148 * Software event: task time clock
5151 static void task_clock_event_update(struct perf_event *event, u64 now)
5156 prev = local64_xchg(&event->hw.prev_count, now);
5158 local64_add(delta, &event->count);
5161 static void task_clock_event_start(struct perf_event *event, int flags)
5163 local64_set(&event->hw.prev_count, event->ctx->time);
5164 perf_swevent_start_hrtimer(event);
5167 static void task_clock_event_stop(struct perf_event *event, int flags)
5169 perf_swevent_cancel_hrtimer(event);
5170 task_clock_event_update(event, event->ctx->time);
5173 static int task_clock_event_add(struct perf_event *event, int flags)
5175 if (flags & PERF_EF_START)
5176 task_clock_event_start(event, flags);
5181 static void task_clock_event_del(struct perf_event *event, int flags)
5183 task_clock_event_stop(event, PERF_EF_UPDATE);
5186 static void task_clock_event_read(struct perf_event *event)
5191 update_context_time(event->ctx);
5192 time = event->ctx->time;
5194 u64 now = perf_clock();
5195 u64 delta = now - event->ctx->timestamp;
5196 time = event->ctx->time + delta;
5199 task_clock_event_update(event, time);
5202 static int task_clock_event_init(struct perf_event *event)
5204 if (event->attr.type != PERF_TYPE_SOFTWARE)
5207 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
5213 static struct pmu perf_task_clock = {
5214 .task_ctx_nr = perf_sw_context,
5216 .event_init = task_clock_event_init,
5217 .add = task_clock_event_add,
5218 .del = task_clock_event_del,
5219 .start = task_clock_event_start,
5220 .stop = task_clock_event_stop,
5221 .read = task_clock_event_read,
5224 static void perf_pmu_nop_void(struct pmu *pmu)
5228 static int perf_pmu_nop_int(struct pmu *pmu)
5233 static void perf_pmu_start_txn(struct pmu *pmu)
5235 perf_pmu_disable(pmu);
5238 static int perf_pmu_commit_txn(struct pmu *pmu)
5240 perf_pmu_enable(pmu);
5244 static void perf_pmu_cancel_txn(struct pmu *pmu)
5246 perf_pmu_enable(pmu);
5250 * Ensures all contexts with the same task_ctx_nr have the same
5251 * pmu_cpu_context too.
5253 static void *find_pmu_context(int ctxn)
5260 list_for_each_entry(pmu, &pmus, entry) {
5261 if (pmu->task_ctx_nr == ctxn)
5262 return pmu->pmu_cpu_context;
5268 static void free_pmu_context(void * __percpu cpu_context)
5272 mutex_lock(&pmus_lock);
5274 * Like a real lame refcount.
5276 list_for_each_entry(pmu, &pmus, entry) {
5277 if (pmu->pmu_cpu_context == cpu_context)
5281 free_percpu(cpu_context);
5283 mutex_unlock(&pmus_lock);
5286 int perf_pmu_register(struct pmu *pmu)
5290 mutex_lock(&pmus_lock);
5292 pmu->pmu_disable_count = alloc_percpu(int);
5293 if (!pmu->pmu_disable_count)
5296 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
5297 if (pmu->pmu_cpu_context)
5298 goto got_cpu_context;
5300 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
5301 if (!pmu->pmu_cpu_context)
5304 for_each_possible_cpu(cpu) {
5305 struct perf_cpu_context *cpuctx;
5307 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5308 __perf_event_init_context(&cpuctx->ctx);
5309 cpuctx->ctx.type = cpu_context;
5310 cpuctx->ctx.pmu = pmu;
5311 cpuctx->jiffies_interval = 1;
5312 INIT_LIST_HEAD(&cpuctx->rotation_list);
5316 if (!pmu->start_txn) {
5317 if (pmu->pmu_enable) {
5319 * If we have pmu_enable/pmu_disable calls, install
5320 * transaction stubs that use that to try and batch
5321 * hardware accesses.
5323 pmu->start_txn = perf_pmu_start_txn;
5324 pmu->commit_txn = perf_pmu_commit_txn;
5325 pmu->cancel_txn = perf_pmu_cancel_txn;
5327 pmu->start_txn = perf_pmu_nop_void;
5328 pmu->commit_txn = perf_pmu_nop_int;
5329 pmu->cancel_txn = perf_pmu_nop_void;
5333 if (!pmu->pmu_enable) {
5334 pmu->pmu_enable = perf_pmu_nop_void;
5335 pmu->pmu_disable = perf_pmu_nop_void;
5338 list_add_rcu(&pmu->entry, &pmus);
5341 mutex_unlock(&pmus_lock);
5346 free_percpu(pmu->pmu_disable_count);
5350 void perf_pmu_unregister(struct pmu *pmu)
5352 mutex_lock(&pmus_lock);
5353 list_del_rcu(&pmu->entry);
5354 mutex_unlock(&pmus_lock);
5357 * We dereference the pmu list under both SRCU and regular RCU, so
5358 * synchronize against both of those.
5360 synchronize_srcu(&pmus_srcu);
5363 free_percpu(pmu->pmu_disable_count);
5364 free_pmu_context(pmu->pmu_cpu_context);
5367 struct pmu *perf_init_event(struct perf_event *event)
5369 struct pmu *pmu = NULL;
5372 idx = srcu_read_lock(&pmus_srcu);
5373 list_for_each_entry_rcu(pmu, &pmus, entry) {
5374 int ret = pmu->event_init(event);
5378 if (ret != -ENOENT) {
5383 pmu = ERR_PTR(-ENOENT);
5385 srcu_read_unlock(&pmus_srcu, idx);
5391 * Allocate and initialize a event structure
5393 static struct perf_event *
5394 perf_event_alloc(struct perf_event_attr *attr, int cpu,
5395 struct task_struct *task,
5396 struct perf_event *group_leader,
5397 struct perf_event *parent_event,
5398 perf_overflow_handler_t overflow_handler)
5401 struct perf_event *event;
5402 struct hw_perf_event *hwc;
5405 event = kzalloc(sizeof(*event), GFP_KERNEL);
5407 return ERR_PTR(-ENOMEM);
5410 * Single events are their own group leaders, with an
5411 * empty sibling list:
5414 group_leader = event;
5416 mutex_init(&event->child_mutex);
5417 INIT_LIST_HEAD(&event->child_list);
5419 INIT_LIST_HEAD(&event->group_entry);
5420 INIT_LIST_HEAD(&event->event_entry);
5421 INIT_LIST_HEAD(&event->sibling_list);
5422 init_waitqueue_head(&event->waitq);
5423 init_irq_work(&event->pending, perf_pending_event);
5425 mutex_init(&event->mmap_mutex);
5428 event->attr = *attr;
5429 event->group_leader = group_leader;
5433 event->parent = parent_event;
5435 event->ns = get_pid_ns(current->nsproxy->pid_ns);
5436 event->id = atomic64_inc_return(&perf_event_id);
5438 event->state = PERF_EVENT_STATE_INACTIVE;
5441 event->attach_state = PERF_ATTACH_TASK;
5442 #ifdef CONFIG_HAVE_HW_BREAKPOINT
5444 * hw_breakpoint is a bit difficult here..
5446 if (attr->type == PERF_TYPE_BREAKPOINT)
5447 event->hw.bp_target = task;
5451 if (!overflow_handler && parent_event)
5452 overflow_handler = parent_event->overflow_handler;
5454 event->overflow_handler = overflow_handler;
5457 event->state = PERF_EVENT_STATE_OFF;
5462 hwc->sample_period = attr->sample_period;
5463 if (attr->freq && attr->sample_freq)
5464 hwc->sample_period = 1;
5465 hwc->last_period = hwc->sample_period;
5467 local64_set(&hwc->period_left, hwc->sample_period);
5470 * we currently do not support PERF_FORMAT_GROUP on inherited events
5472 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
5475 pmu = perf_init_event(event);
5481 else if (IS_ERR(pmu))
5486 put_pid_ns(event->ns);
5488 return ERR_PTR(err);
5493 if (!event->parent) {
5494 if (event->attach_state & PERF_ATTACH_TASK)
5495 jump_label_inc(&perf_task_events);
5496 if (event->attr.mmap || event->attr.mmap_data)
5497 atomic_inc(&nr_mmap_events);
5498 if (event->attr.comm)
5499 atomic_inc(&nr_comm_events);
5500 if (event->attr.task)
5501 atomic_inc(&nr_task_events);
5502 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
5503 err = get_callchain_buffers();
5506 return ERR_PTR(err);
5514 static int perf_copy_attr(struct perf_event_attr __user *uattr,
5515 struct perf_event_attr *attr)
5520 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
5524 * zero the full structure, so that a short copy will be nice.
5526 memset(attr, 0, sizeof(*attr));
5528 ret = get_user(size, &uattr->size);
5532 if (size > PAGE_SIZE) /* silly large */
5535 if (!size) /* abi compat */
5536 size = PERF_ATTR_SIZE_VER0;
5538 if (size < PERF_ATTR_SIZE_VER0)
5542 * If we're handed a bigger struct than we know of,
5543 * ensure all the unknown bits are 0 - i.e. new
5544 * user-space does not rely on any kernel feature
5545 * extensions we dont know about yet.
5547 if (size > sizeof(*attr)) {
5548 unsigned char __user *addr;
5549 unsigned char __user *end;
5552 addr = (void __user *)uattr + sizeof(*attr);
5553 end = (void __user *)uattr + size;
5555 for (; addr < end; addr++) {
5556 ret = get_user(val, addr);
5562 size = sizeof(*attr);
5565 ret = copy_from_user(attr, uattr, size);
5570 * If the type exists, the corresponding creation will verify
5573 if (attr->type >= PERF_TYPE_MAX)
5576 if (attr->__reserved_1)
5579 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
5582 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
5589 put_user(sizeof(*attr), &uattr->size);
5595 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
5597 struct perf_buffer *buffer = NULL, *old_buffer = NULL;
5603 /* don't allow circular references */
5604 if (event == output_event)
5608 * Don't allow cross-cpu buffers
5610 if (output_event->cpu != event->cpu)
5614 * If its not a per-cpu buffer, it must be the same task.
5616 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
5620 mutex_lock(&event->mmap_mutex);
5621 /* Can't redirect output if we've got an active mmap() */
5622 if (atomic_read(&event->mmap_count))
5626 /* get the buffer we want to redirect to */
5627 buffer = perf_buffer_get(output_event);
5632 old_buffer = event->buffer;
5633 rcu_assign_pointer(event->buffer, buffer);
5636 mutex_unlock(&event->mmap_mutex);
5639 perf_buffer_put(old_buffer);
5645 * sys_perf_event_open - open a performance event, associate it to a task/cpu
5647 * @attr_uptr: event_id type attributes for monitoring/sampling
5650 * @group_fd: group leader event fd
5652 SYSCALL_DEFINE5(perf_event_open,
5653 struct perf_event_attr __user *, attr_uptr,
5654 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
5656 struct perf_event *group_leader = NULL, *output_event = NULL;
5657 struct perf_event *event, *sibling;
5658 struct perf_event_attr attr;
5659 struct perf_event_context *ctx;
5660 struct file *event_file = NULL;
5661 struct file *group_file = NULL;
5662 struct task_struct *task = NULL;
5666 int fput_needed = 0;
5669 /* for future expandability... */
5670 if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
5673 err = perf_copy_attr(attr_uptr, &attr);
5677 if (!attr.exclude_kernel) {
5678 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
5683 if (attr.sample_freq > sysctl_perf_event_sample_rate)
5687 event_fd = get_unused_fd_flags(O_RDWR);
5691 if (group_fd != -1) {
5692 group_leader = perf_fget_light(group_fd, &fput_needed);
5693 if (IS_ERR(group_leader)) {
5694 err = PTR_ERR(group_leader);
5697 group_file = group_leader->filp;
5698 if (flags & PERF_FLAG_FD_OUTPUT)
5699 output_event = group_leader;
5700 if (flags & PERF_FLAG_FD_NO_GROUP)
5701 group_leader = NULL;
5705 task = find_lively_task_by_vpid(pid);
5707 err = PTR_ERR(task);
5712 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL);
5713 if (IS_ERR(event)) {
5714 err = PTR_ERR(event);
5719 * Special case software events and allow them to be part of
5720 * any hardware group.
5725 (is_software_event(event) != is_software_event(group_leader))) {
5726 if (is_software_event(event)) {
5728 * If event and group_leader are not both a software
5729 * event, and event is, then group leader is not.
5731 * Allow the addition of software events to !software
5732 * groups, this is safe because software events never
5735 pmu = group_leader->pmu;
5736 } else if (is_software_event(group_leader) &&
5737 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
5739 * In case the group is a pure software group, and we
5740 * try to add a hardware event, move the whole group to
5741 * the hardware context.
5748 * Get the target context (task or percpu):
5750 ctx = find_get_context(pmu, task, cpu);
5757 * Look up the group leader (we will attach this event to it):
5763 * Do not allow a recursive hierarchy (this new sibling
5764 * becoming part of another group-sibling):
5766 if (group_leader->group_leader != group_leader)
5769 * Do not allow to attach to a group in a different
5770 * task or CPU context:
5773 if (group_leader->ctx->type != ctx->type)
5776 if (group_leader->ctx != ctx)
5781 * Only a group leader can be exclusive or pinned
5783 if (attr.exclusive || attr.pinned)
5788 err = perf_event_set_output(event, output_event);
5793 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
5794 if (IS_ERR(event_file)) {
5795 err = PTR_ERR(event_file);
5800 struct perf_event_context *gctx = group_leader->ctx;
5802 mutex_lock(&gctx->mutex);
5803 perf_event_remove_from_context(group_leader);
5804 list_for_each_entry(sibling, &group_leader->sibling_list,
5806 perf_event_remove_from_context(sibling);
5809 mutex_unlock(&gctx->mutex);
5813 event->filp = event_file;
5814 WARN_ON_ONCE(ctx->parent_ctx);
5815 mutex_lock(&ctx->mutex);
5818 perf_install_in_context(ctx, group_leader, cpu);
5820 list_for_each_entry(sibling, &group_leader->sibling_list,
5822 perf_install_in_context(ctx, sibling, cpu);
5827 perf_install_in_context(ctx, event, cpu);
5829 mutex_unlock(&ctx->mutex);
5831 event->owner = current;
5833 mutex_lock(¤t->perf_event_mutex);
5834 list_add_tail(&event->owner_entry, ¤t->perf_event_list);
5835 mutex_unlock(¤t->perf_event_mutex);
5838 * Precalculate sample_data sizes
5840 perf_event__header_size(event);
5841 perf_event__id_header_size(event);
5844 * Drop the reference on the group_event after placing the
5845 * new event on the sibling_list. This ensures destruction
5846 * of the group leader will find the pointer to itself in
5847 * perf_group_detach().
5849 fput_light(group_file, fput_needed);
5850 fd_install(event_fd, event_file);
5859 put_task_struct(task);
5861 fput_light(group_file, fput_needed);
5863 put_unused_fd(event_fd);
5868 * perf_event_create_kernel_counter
5870 * @attr: attributes of the counter to create
5871 * @cpu: cpu in which the counter is bound
5872 * @task: task to profile (NULL for percpu)
5875 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
5876 struct task_struct *task,
5877 perf_overflow_handler_t overflow_handler)
5879 struct perf_event_context *ctx;
5880 struct perf_event *event;
5884 * Get the target context (task or percpu):
5887 event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler);
5888 if (IS_ERR(event)) {
5889 err = PTR_ERR(event);
5893 ctx = find_get_context(event->pmu, task, cpu);
5900 WARN_ON_ONCE(ctx->parent_ctx);
5901 mutex_lock(&ctx->mutex);
5902 perf_install_in_context(ctx, event, cpu);
5904 mutex_unlock(&ctx->mutex);
5911 return ERR_PTR(err);
5913 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
5915 static void sync_child_event(struct perf_event *child_event,
5916 struct task_struct *child)
5918 struct perf_event *parent_event = child_event->parent;
5921 if (child_event->attr.inherit_stat)
5922 perf_event_read_event(child_event, child);
5924 child_val = perf_event_count(child_event);
5927 * Add back the child's count to the parent's count:
5929 atomic64_add(child_val, &parent_event->child_count);
5930 atomic64_add(child_event->total_time_enabled,
5931 &parent_event->child_total_time_enabled);
5932 atomic64_add(child_event->total_time_running,
5933 &parent_event->child_total_time_running);
5936 * Remove this event from the parent's list
5938 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5939 mutex_lock(&parent_event->child_mutex);
5940 list_del_init(&child_event->child_list);
5941 mutex_unlock(&parent_event->child_mutex);
5944 * Release the parent event, if this was the last
5947 fput(parent_event->filp);
5951 __perf_event_exit_task(struct perf_event *child_event,
5952 struct perf_event_context *child_ctx,
5953 struct task_struct *child)
5955 struct perf_event *parent_event;
5957 perf_event_remove_from_context(child_event);
5959 parent_event = child_event->parent;
5961 * It can happen that parent exits first, and has events
5962 * that are still around due to the child reference. These
5963 * events need to be zapped - but otherwise linger.
5966 sync_child_event(child_event, child);
5967 free_event(child_event);
5971 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
5973 struct perf_event *child_event, *tmp;
5974 struct perf_event_context *child_ctx;
5975 unsigned long flags;
5977 if (likely(!child->perf_event_ctxp[ctxn])) {
5978 perf_event_task(child, NULL, 0);
5982 local_irq_save(flags);
5984 * We can't reschedule here because interrupts are disabled,
5985 * and either child is current or it is a task that can't be
5986 * scheduled, so we are now safe from rescheduling changing
5989 child_ctx = child->perf_event_ctxp[ctxn];
5990 task_ctx_sched_out(child_ctx, EVENT_ALL);
5993 * Take the context lock here so that if find_get_context is
5994 * reading child->perf_event_ctxp, we wait until it has
5995 * incremented the context's refcount before we do put_ctx below.
5997 raw_spin_lock(&child_ctx->lock);
5998 child->perf_event_ctxp[ctxn] = NULL;
6000 * If this context is a clone; unclone it so it can't get
6001 * swapped to another process while we're removing all
6002 * the events from it.
6004 unclone_ctx(child_ctx);
6005 update_context_time(child_ctx);
6006 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
6009 * Report the task dead after unscheduling the events so that we
6010 * won't get any samples after PERF_RECORD_EXIT. We can however still
6011 * get a few PERF_RECORD_READ events.
6013 perf_event_task(child, child_ctx, 0);
6016 * We can recurse on the same lock type through:
6018 * __perf_event_exit_task()
6019 * sync_child_event()
6020 * fput(parent_event->filp)
6022 * mutex_lock(&ctx->mutex)
6024 * But since its the parent context it won't be the same instance.
6026 mutex_lock(&child_ctx->mutex);
6029 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
6031 __perf_event_exit_task(child_event, child_ctx, child);
6033 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
6035 __perf_event_exit_task(child_event, child_ctx, child);
6038 * If the last event was a group event, it will have appended all
6039 * its siblings to the list, but we obtained 'tmp' before that which
6040 * will still point to the list head terminating the iteration.
6042 if (!list_empty(&child_ctx->pinned_groups) ||
6043 !list_empty(&child_ctx->flexible_groups))
6046 mutex_unlock(&child_ctx->mutex);
6052 * When a child task exits, feed back event values to parent events.
6054 void perf_event_exit_task(struct task_struct *child)
6056 struct perf_event *event, *tmp;
6059 mutex_lock(&child->perf_event_mutex);
6060 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
6062 list_del_init(&event->owner_entry);
6065 * Ensure the list deletion is visible before we clear
6066 * the owner, closes a race against perf_release() where
6067 * we need to serialize on the owner->perf_event_mutex.
6070 event->owner = NULL;
6072 mutex_unlock(&child->perf_event_mutex);
6074 for_each_task_context_nr(ctxn)
6075 perf_event_exit_task_context(child, ctxn);
6078 static void perf_free_event(struct perf_event *event,
6079 struct perf_event_context *ctx)
6081 struct perf_event *parent = event->parent;
6083 if (WARN_ON_ONCE(!parent))
6086 mutex_lock(&parent->child_mutex);
6087 list_del_init(&event->child_list);
6088 mutex_unlock(&parent->child_mutex);
6092 perf_group_detach(event);
6093 list_del_event(event, ctx);
6098 * free an unexposed, unused context as created by inheritance by
6099 * perf_event_init_task below, used by fork() in case of fail.
6101 void perf_event_free_task(struct task_struct *task)
6103 struct perf_event_context *ctx;
6104 struct perf_event *event, *tmp;
6107 for_each_task_context_nr(ctxn) {
6108 ctx = task->perf_event_ctxp[ctxn];
6112 mutex_lock(&ctx->mutex);
6114 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
6116 perf_free_event(event, ctx);
6118 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
6120 perf_free_event(event, ctx);
6122 if (!list_empty(&ctx->pinned_groups) ||
6123 !list_empty(&ctx->flexible_groups))
6126 mutex_unlock(&ctx->mutex);
6132 void perf_event_delayed_put(struct task_struct *task)
6136 for_each_task_context_nr(ctxn)
6137 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
6141 * inherit a event from parent task to child task:
6143 static struct perf_event *
6144 inherit_event(struct perf_event *parent_event,
6145 struct task_struct *parent,
6146 struct perf_event_context *parent_ctx,
6147 struct task_struct *child,
6148 struct perf_event *group_leader,
6149 struct perf_event_context *child_ctx)
6151 struct perf_event *child_event;
6152 unsigned long flags;
6155 * Instead of creating recursive hierarchies of events,
6156 * we link inherited events back to the original parent,
6157 * which has a filp for sure, which we use as the reference
6160 if (parent_event->parent)
6161 parent_event = parent_event->parent;
6163 child_event = perf_event_alloc(&parent_event->attr,
6166 group_leader, parent_event,
6168 if (IS_ERR(child_event))
6173 * Make the child state follow the state of the parent event,
6174 * not its attr.disabled bit. We hold the parent's mutex,
6175 * so we won't race with perf_event_{en, dis}able_family.
6177 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
6178 child_event->state = PERF_EVENT_STATE_INACTIVE;
6180 child_event->state = PERF_EVENT_STATE_OFF;
6182 if (parent_event->attr.freq) {
6183 u64 sample_period = parent_event->hw.sample_period;
6184 struct hw_perf_event *hwc = &child_event->hw;
6186 hwc->sample_period = sample_period;
6187 hwc->last_period = sample_period;
6189 local64_set(&hwc->period_left, sample_period);
6192 child_event->ctx = child_ctx;
6193 child_event->overflow_handler = parent_event->overflow_handler;
6196 * Precalculate sample_data sizes
6198 perf_event__header_size(child_event);
6199 perf_event__id_header_size(child_event);
6202 * Link it up in the child's context:
6204 raw_spin_lock_irqsave(&child_ctx->lock, flags);
6205 add_event_to_ctx(child_event, child_ctx);
6206 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
6209 * Get a reference to the parent filp - we will fput it
6210 * when the child event exits. This is safe to do because
6211 * we are in the parent and we know that the filp still
6212 * exists and has a nonzero count:
6214 atomic_long_inc(&parent_event->filp->f_count);
6217 * Link this into the parent event's child list
6219 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6220 mutex_lock(&parent_event->child_mutex);
6221 list_add_tail(&child_event->child_list, &parent_event->child_list);
6222 mutex_unlock(&parent_event->child_mutex);
6227 static int inherit_group(struct perf_event *parent_event,
6228 struct task_struct *parent,
6229 struct perf_event_context *parent_ctx,
6230 struct task_struct *child,
6231 struct perf_event_context *child_ctx)
6233 struct perf_event *leader;
6234 struct perf_event *sub;
6235 struct perf_event *child_ctr;
6237 leader = inherit_event(parent_event, parent, parent_ctx,
6238 child, NULL, child_ctx);
6240 return PTR_ERR(leader);
6241 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
6242 child_ctr = inherit_event(sub, parent, parent_ctx,
6243 child, leader, child_ctx);
6244 if (IS_ERR(child_ctr))
6245 return PTR_ERR(child_ctr);
6251 inherit_task_group(struct perf_event *event, struct task_struct *parent,
6252 struct perf_event_context *parent_ctx,
6253 struct task_struct *child, int ctxn,
6257 struct perf_event_context *child_ctx;
6259 if (!event->attr.inherit) {
6264 child_ctx = child->perf_event_ctxp[ctxn];
6267 * This is executed from the parent task context, so
6268 * inherit events that have been marked for cloning.
6269 * First allocate and initialize a context for the
6273 child_ctx = alloc_perf_context(event->pmu, child);
6277 child->perf_event_ctxp[ctxn] = child_ctx;
6280 ret = inherit_group(event, parent, parent_ctx,
6290 * Initialize the perf_event context in task_struct
6292 int perf_event_init_context(struct task_struct *child, int ctxn)
6294 struct perf_event_context *child_ctx, *parent_ctx;
6295 struct perf_event_context *cloned_ctx;
6296 struct perf_event *event;
6297 struct task_struct *parent = current;
6298 int inherited_all = 1;
6299 unsigned long flags;
6302 child->perf_event_ctxp[ctxn] = NULL;
6304 mutex_init(&child->perf_event_mutex);
6305 INIT_LIST_HEAD(&child->perf_event_list);
6307 if (likely(!parent->perf_event_ctxp[ctxn]))
6311 * If the parent's context is a clone, pin it so it won't get
6314 parent_ctx = perf_pin_task_context(parent, ctxn);
6317 * No need to check if parent_ctx != NULL here; since we saw
6318 * it non-NULL earlier, the only reason for it to become NULL
6319 * is if we exit, and since we're currently in the middle of
6320 * a fork we can't be exiting at the same time.
6324 * Lock the parent list. No need to lock the child - not PID
6325 * hashed yet and not running, so nobody can access it.
6327 mutex_lock(&parent_ctx->mutex);
6330 * We dont have to disable NMIs - we are only looking at
6331 * the list, not manipulating it:
6333 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
6334 ret = inherit_task_group(event, parent, parent_ctx,
6335 child, ctxn, &inherited_all);
6341 * We can't hold ctx->lock when iterating the ->flexible_group list due
6342 * to allocations, but we need to prevent rotation because
6343 * rotate_ctx() will change the list from interrupt context.
6345 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6346 parent_ctx->rotate_disable = 1;
6347 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6349 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
6350 ret = inherit_task_group(event, parent, parent_ctx,
6351 child, ctxn, &inherited_all);
6356 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6357 parent_ctx->rotate_disable = 0;
6358 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6360 child_ctx = child->perf_event_ctxp[ctxn];
6362 if (child_ctx && inherited_all) {
6364 * Mark the child context as a clone of the parent
6365 * context, or of whatever the parent is a clone of.
6366 * Note that if the parent is a clone, it could get
6367 * uncloned at any point, but that doesn't matter
6368 * because the list of events and the generation
6369 * count can't have changed since we took the mutex.
6371 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
6373 child_ctx->parent_ctx = cloned_ctx;
6374 child_ctx->parent_gen = parent_ctx->parent_gen;
6376 child_ctx->parent_ctx = parent_ctx;
6377 child_ctx->parent_gen = parent_ctx->generation;
6379 get_ctx(child_ctx->parent_ctx);
6382 mutex_unlock(&parent_ctx->mutex);
6384 perf_unpin_context(parent_ctx);
6390 * Initialize the perf_event context in task_struct
6392 int perf_event_init_task(struct task_struct *child)
6396 for_each_task_context_nr(ctxn) {
6397 ret = perf_event_init_context(child, ctxn);
6405 static void __init perf_event_init_all_cpus(void)
6407 struct swevent_htable *swhash;
6410 for_each_possible_cpu(cpu) {
6411 swhash = &per_cpu(swevent_htable, cpu);
6412 mutex_init(&swhash->hlist_mutex);
6413 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
6417 static void __cpuinit perf_event_init_cpu(int cpu)
6419 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6421 mutex_lock(&swhash->hlist_mutex);
6422 if (swhash->hlist_refcount > 0) {
6423 struct swevent_hlist *hlist;
6425 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
6427 rcu_assign_pointer(swhash->swevent_hlist, hlist);
6429 mutex_unlock(&swhash->hlist_mutex);
6432 #ifdef CONFIG_HOTPLUG_CPU
6433 static void perf_pmu_rotate_stop(struct pmu *pmu)
6435 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6437 WARN_ON(!irqs_disabled());
6439 list_del_init(&cpuctx->rotation_list);
6442 static void __perf_event_exit_context(void *__info)
6444 struct perf_event_context *ctx = __info;
6445 struct perf_event *event, *tmp;
6447 perf_pmu_rotate_stop(ctx->pmu);
6449 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
6450 __perf_event_remove_from_context(event);
6451 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
6452 __perf_event_remove_from_context(event);
6455 static void perf_event_exit_cpu_context(int cpu)
6457 struct perf_event_context *ctx;
6461 idx = srcu_read_lock(&pmus_srcu);
6462 list_for_each_entry_rcu(pmu, &pmus, entry) {
6463 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
6465 mutex_lock(&ctx->mutex);
6466 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
6467 mutex_unlock(&ctx->mutex);
6469 srcu_read_unlock(&pmus_srcu, idx);
6472 static void perf_event_exit_cpu(int cpu)
6474 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6476 mutex_lock(&swhash->hlist_mutex);
6477 swevent_hlist_release(swhash);
6478 mutex_unlock(&swhash->hlist_mutex);
6480 perf_event_exit_cpu_context(cpu);
6483 static inline void perf_event_exit_cpu(int cpu) { }
6486 static int __cpuinit
6487 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
6489 unsigned int cpu = (long)hcpu;
6491 switch (action & ~CPU_TASKS_FROZEN) {
6493 case CPU_UP_PREPARE:
6494 case CPU_DOWN_FAILED:
6495 perf_event_init_cpu(cpu);
6498 case CPU_UP_CANCELED:
6499 case CPU_DOWN_PREPARE:
6500 perf_event_exit_cpu(cpu);
6510 void __init perf_event_init(void)
6514 perf_event_init_all_cpus();
6515 init_srcu_struct(&pmus_srcu);
6516 perf_pmu_register(&perf_swevent);
6517 perf_pmu_register(&perf_cpu_clock);
6518 perf_pmu_register(&perf_task_clock);
6520 perf_cpu_notifier(perf_cpu_notify);
6522 ret = init_hw_breakpoint();
6523 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);