]> git.karo-electronics.de Git - mv-sheeva.git/blob - kernel/perf_event.c
perf: Rework the PMU methods
[mv-sheeva.git] / kernel / perf_event.c
1 /*
2  * Performance events core code:
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  * For licensing details see kernel-base/COPYING
10  */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/slab.h>
19 #include <linux/hash.h>
20 #include <linux/sysfs.h>
21 #include <linux/dcache.h>
22 #include <linux/percpu.h>
23 #include <linux/ptrace.h>
24 #include <linux/vmstat.h>
25 #include <linux/vmalloc.h>
26 #include <linux/hardirq.h>
27 #include <linux/rculist.h>
28 #include <linux/uaccess.h>
29 #include <linux/syscalls.h>
30 #include <linux/anon_inodes.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/perf_event.h>
33 #include <linux/ftrace_event.h>
34
35 #include <asm/irq_regs.h>
36
37 /*
38  * Each CPU has a list of per CPU events:
39  */
40 static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
41
42 int perf_max_events __read_mostly = 1;
43 static int perf_reserved_percpu __read_mostly;
44 static int perf_overcommit __read_mostly = 1;
45
46 static atomic_t nr_events __read_mostly;
47 static atomic_t nr_mmap_events __read_mostly;
48 static atomic_t nr_comm_events __read_mostly;
49 static atomic_t nr_task_events __read_mostly;
50
51 /*
52  * perf event paranoia level:
53  *  -1 - not paranoid at all
54  *   0 - disallow raw tracepoint access for unpriv
55  *   1 - disallow cpu events for unpriv
56  *   2 - disallow kernel profiling for unpriv
57  */
58 int sysctl_perf_event_paranoid __read_mostly = 1;
59
60 int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
61
62 /*
63  * max perf event sample rate
64  */
65 int sysctl_perf_event_sample_rate __read_mostly = 100000;
66
67 static atomic64_t perf_event_id;
68
69 /*
70  * Lock for (sysadmin-configurable) event reservations:
71  */
72 static DEFINE_SPINLOCK(perf_resource_lock);
73
74 void __weak perf_event_print_debug(void)        { }
75
76 void perf_pmu_disable(struct pmu *pmu)
77 {
78         int *count = this_cpu_ptr(pmu->pmu_disable_count);
79         if (!(*count)++)
80                 pmu->pmu_disable(pmu);
81 }
82
83 void perf_pmu_enable(struct pmu *pmu)
84 {
85         int *count = this_cpu_ptr(pmu->pmu_disable_count);
86         if (!--(*count))
87                 pmu->pmu_enable(pmu);
88 }
89
90 static void get_ctx(struct perf_event_context *ctx)
91 {
92         WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
93 }
94
95 static void free_ctx(struct rcu_head *head)
96 {
97         struct perf_event_context *ctx;
98
99         ctx = container_of(head, struct perf_event_context, rcu_head);
100         kfree(ctx);
101 }
102
103 static void put_ctx(struct perf_event_context *ctx)
104 {
105         if (atomic_dec_and_test(&ctx->refcount)) {
106                 if (ctx->parent_ctx)
107                         put_ctx(ctx->parent_ctx);
108                 if (ctx->task)
109                         put_task_struct(ctx->task);
110                 call_rcu(&ctx->rcu_head, free_ctx);
111         }
112 }
113
114 static void unclone_ctx(struct perf_event_context *ctx)
115 {
116         if (ctx->parent_ctx) {
117                 put_ctx(ctx->parent_ctx);
118                 ctx->parent_ctx = NULL;
119         }
120 }
121
122 /*
123  * If we inherit events we want to return the parent event id
124  * to userspace.
125  */
126 static u64 primary_event_id(struct perf_event *event)
127 {
128         u64 id = event->id;
129
130         if (event->parent)
131                 id = event->parent->id;
132
133         return id;
134 }
135
136 /*
137  * Get the perf_event_context for a task and lock it.
138  * This has to cope with with the fact that until it is locked,
139  * the context could get moved to another task.
140  */
141 static struct perf_event_context *
142 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
143 {
144         struct perf_event_context *ctx;
145
146         rcu_read_lock();
147 retry:
148         ctx = rcu_dereference(task->perf_event_ctxp);
149         if (ctx) {
150                 /*
151                  * If this context is a clone of another, it might
152                  * get swapped for another underneath us by
153                  * perf_event_task_sched_out, though the
154                  * rcu_read_lock() protects us from any context
155                  * getting freed.  Lock the context and check if it
156                  * got swapped before we could get the lock, and retry
157                  * if so.  If we locked the right context, then it
158                  * can't get swapped on us any more.
159                  */
160                 raw_spin_lock_irqsave(&ctx->lock, *flags);
161                 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
162                         raw_spin_unlock_irqrestore(&ctx->lock, *flags);
163                         goto retry;
164                 }
165
166                 if (!atomic_inc_not_zero(&ctx->refcount)) {
167                         raw_spin_unlock_irqrestore(&ctx->lock, *flags);
168                         ctx = NULL;
169                 }
170         }
171         rcu_read_unlock();
172         return ctx;
173 }
174
175 /*
176  * Get the context for a task and increment its pin_count so it
177  * can't get swapped to another task.  This also increments its
178  * reference count so that the context can't get freed.
179  */
180 static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
181 {
182         struct perf_event_context *ctx;
183         unsigned long flags;
184
185         ctx = perf_lock_task_context(task, &flags);
186         if (ctx) {
187                 ++ctx->pin_count;
188                 raw_spin_unlock_irqrestore(&ctx->lock, flags);
189         }
190         return ctx;
191 }
192
193 static void perf_unpin_context(struct perf_event_context *ctx)
194 {
195         unsigned long flags;
196
197         raw_spin_lock_irqsave(&ctx->lock, flags);
198         --ctx->pin_count;
199         raw_spin_unlock_irqrestore(&ctx->lock, flags);
200         put_ctx(ctx);
201 }
202
203 static inline u64 perf_clock(void)
204 {
205         return local_clock();
206 }
207
208 /*
209  * Update the record of the current time in a context.
210  */
211 static void update_context_time(struct perf_event_context *ctx)
212 {
213         u64 now = perf_clock();
214
215         ctx->time += now - ctx->timestamp;
216         ctx->timestamp = now;
217 }
218
219 /*
220  * Update the total_time_enabled and total_time_running fields for a event.
221  */
222 static void update_event_times(struct perf_event *event)
223 {
224         struct perf_event_context *ctx = event->ctx;
225         u64 run_end;
226
227         if (event->state < PERF_EVENT_STATE_INACTIVE ||
228             event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
229                 return;
230
231         if (ctx->is_active)
232                 run_end = ctx->time;
233         else
234                 run_end = event->tstamp_stopped;
235
236         event->total_time_enabled = run_end - event->tstamp_enabled;
237
238         if (event->state == PERF_EVENT_STATE_INACTIVE)
239                 run_end = event->tstamp_stopped;
240         else
241                 run_end = ctx->time;
242
243         event->total_time_running = run_end - event->tstamp_running;
244 }
245
246 /*
247  * Update total_time_enabled and total_time_running for all events in a group.
248  */
249 static void update_group_times(struct perf_event *leader)
250 {
251         struct perf_event *event;
252
253         update_event_times(leader);
254         list_for_each_entry(event, &leader->sibling_list, group_entry)
255                 update_event_times(event);
256 }
257
258 static struct list_head *
259 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
260 {
261         if (event->attr.pinned)
262                 return &ctx->pinned_groups;
263         else
264                 return &ctx->flexible_groups;
265 }
266
267 /*
268  * Add a event from the lists for its context.
269  * Must be called with ctx->mutex and ctx->lock held.
270  */
271 static void
272 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
273 {
274         WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
275         event->attach_state |= PERF_ATTACH_CONTEXT;
276
277         /*
278          * If we're a stand alone event or group leader, we go to the context
279          * list, group events are kept attached to the group so that
280          * perf_group_detach can, at all times, locate all siblings.
281          */
282         if (event->group_leader == event) {
283                 struct list_head *list;
284
285                 if (is_software_event(event))
286                         event->group_flags |= PERF_GROUP_SOFTWARE;
287
288                 list = ctx_group_list(event, ctx);
289                 list_add_tail(&event->group_entry, list);
290         }
291
292         list_add_rcu(&event->event_entry, &ctx->event_list);
293         ctx->nr_events++;
294         if (event->attr.inherit_stat)
295                 ctx->nr_stat++;
296 }
297
298 static void perf_group_attach(struct perf_event *event)
299 {
300         struct perf_event *group_leader = event->group_leader;
301
302         WARN_ON_ONCE(event->attach_state & PERF_ATTACH_GROUP);
303         event->attach_state |= PERF_ATTACH_GROUP;
304
305         if (group_leader == event)
306                 return;
307
308         if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
309                         !is_software_event(event))
310                 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
311
312         list_add_tail(&event->group_entry, &group_leader->sibling_list);
313         group_leader->nr_siblings++;
314 }
315
316 /*
317  * Remove a event from the lists for its context.
318  * Must be called with ctx->mutex and ctx->lock held.
319  */
320 static void
321 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
322 {
323         /*
324          * We can have double detach due to exit/hot-unplug + close.
325          */
326         if (!(event->attach_state & PERF_ATTACH_CONTEXT))
327                 return;
328
329         event->attach_state &= ~PERF_ATTACH_CONTEXT;
330
331         ctx->nr_events--;
332         if (event->attr.inherit_stat)
333                 ctx->nr_stat--;
334
335         list_del_rcu(&event->event_entry);
336
337         if (event->group_leader == event)
338                 list_del_init(&event->group_entry);
339
340         update_group_times(event);
341
342         /*
343          * If event was in error state, then keep it
344          * that way, otherwise bogus counts will be
345          * returned on read(). The only way to get out
346          * of error state is by explicit re-enabling
347          * of the event
348          */
349         if (event->state > PERF_EVENT_STATE_OFF)
350                 event->state = PERF_EVENT_STATE_OFF;
351 }
352
353 static void perf_group_detach(struct perf_event *event)
354 {
355         struct perf_event *sibling, *tmp;
356         struct list_head *list = NULL;
357
358         /*
359          * We can have double detach due to exit/hot-unplug + close.
360          */
361         if (!(event->attach_state & PERF_ATTACH_GROUP))
362                 return;
363
364         event->attach_state &= ~PERF_ATTACH_GROUP;
365
366         /*
367          * If this is a sibling, remove it from its group.
368          */
369         if (event->group_leader != event) {
370                 list_del_init(&event->group_entry);
371                 event->group_leader->nr_siblings--;
372                 return;
373         }
374
375         if (!list_empty(&event->group_entry))
376                 list = &event->group_entry;
377
378         /*
379          * If this was a group event with sibling events then
380          * upgrade the siblings to singleton events by adding them
381          * to whatever list we are on.
382          */
383         list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
384                 if (list)
385                         list_move_tail(&sibling->group_entry, list);
386                 sibling->group_leader = sibling;
387
388                 /* Inherit group flags from the previous leader */
389                 sibling->group_flags = event->group_flags;
390         }
391 }
392
393 static inline int
394 event_filter_match(struct perf_event *event)
395 {
396         return event->cpu == -1 || event->cpu == smp_processor_id();
397 }
398
399 static void
400 event_sched_out(struct perf_event *event,
401                   struct perf_cpu_context *cpuctx,
402                   struct perf_event_context *ctx)
403 {
404         u64 delta;
405         /*
406          * An event which could not be activated because of
407          * filter mismatch still needs to have its timings
408          * maintained, otherwise bogus information is return
409          * via read() for time_enabled, time_running:
410          */
411         if (event->state == PERF_EVENT_STATE_INACTIVE
412             && !event_filter_match(event)) {
413                 delta = ctx->time - event->tstamp_stopped;
414                 event->tstamp_running += delta;
415                 event->tstamp_stopped = ctx->time;
416         }
417
418         if (event->state != PERF_EVENT_STATE_ACTIVE)
419                 return;
420
421         event->state = PERF_EVENT_STATE_INACTIVE;
422         if (event->pending_disable) {
423                 event->pending_disable = 0;
424                 event->state = PERF_EVENT_STATE_OFF;
425         }
426         event->tstamp_stopped = ctx->time;
427         event->pmu->del(event, 0);
428         event->oncpu = -1;
429
430         if (!is_software_event(event))
431                 cpuctx->active_oncpu--;
432         ctx->nr_active--;
433         if (event->attr.exclusive || !cpuctx->active_oncpu)
434                 cpuctx->exclusive = 0;
435 }
436
437 static void
438 group_sched_out(struct perf_event *group_event,
439                 struct perf_cpu_context *cpuctx,
440                 struct perf_event_context *ctx)
441 {
442         struct perf_event *event;
443         int state = group_event->state;
444
445         event_sched_out(group_event, cpuctx, ctx);
446
447         /*
448          * Schedule out siblings (if any):
449          */
450         list_for_each_entry(event, &group_event->sibling_list, group_entry)
451                 event_sched_out(event, cpuctx, ctx);
452
453         if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
454                 cpuctx->exclusive = 0;
455 }
456
457 /*
458  * Cross CPU call to remove a performance event
459  *
460  * We disable the event on the hardware level first. After that we
461  * remove it from the context list.
462  */
463 static void __perf_event_remove_from_context(void *info)
464 {
465         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
466         struct perf_event *event = info;
467         struct perf_event_context *ctx = event->ctx;
468
469         /*
470          * If this is a task context, we need to check whether it is
471          * the current task context of this cpu. If not it has been
472          * scheduled out before the smp call arrived.
473          */
474         if (ctx->task && cpuctx->task_ctx != ctx)
475                 return;
476
477         raw_spin_lock(&ctx->lock);
478
479         event_sched_out(event, cpuctx, ctx);
480
481         list_del_event(event, ctx);
482
483         if (!ctx->task) {
484                 /*
485                  * Allow more per task events with respect to the
486                  * reservation:
487                  */
488                 cpuctx->max_pertask =
489                         min(perf_max_events - ctx->nr_events,
490                             perf_max_events - perf_reserved_percpu);
491         }
492
493         raw_spin_unlock(&ctx->lock);
494 }
495
496
497 /*
498  * Remove the event from a task's (or a CPU's) list of events.
499  *
500  * Must be called with ctx->mutex held.
501  *
502  * CPU events are removed with a smp call. For task events we only
503  * call when the task is on a CPU.
504  *
505  * If event->ctx is a cloned context, callers must make sure that
506  * every task struct that event->ctx->task could possibly point to
507  * remains valid.  This is OK when called from perf_release since
508  * that only calls us on the top-level context, which can't be a clone.
509  * When called from perf_event_exit_task, it's OK because the
510  * context has been detached from its task.
511  */
512 static void perf_event_remove_from_context(struct perf_event *event)
513 {
514         struct perf_event_context *ctx = event->ctx;
515         struct task_struct *task = ctx->task;
516
517         if (!task) {
518                 /*
519                  * Per cpu events are removed via an smp call and
520                  * the removal is always successful.
521                  */
522                 smp_call_function_single(event->cpu,
523                                          __perf_event_remove_from_context,
524                                          event, 1);
525                 return;
526         }
527
528 retry:
529         task_oncpu_function_call(task, __perf_event_remove_from_context,
530                                  event);
531
532         raw_spin_lock_irq(&ctx->lock);
533         /*
534          * If the context is active we need to retry the smp call.
535          */
536         if (ctx->nr_active && !list_empty(&event->group_entry)) {
537                 raw_spin_unlock_irq(&ctx->lock);
538                 goto retry;
539         }
540
541         /*
542          * The lock prevents that this context is scheduled in so we
543          * can remove the event safely, if the call above did not
544          * succeed.
545          */
546         if (!list_empty(&event->group_entry))
547                 list_del_event(event, ctx);
548         raw_spin_unlock_irq(&ctx->lock);
549 }
550
551 /*
552  * Cross CPU call to disable a performance event
553  */
554 static void __perf_event_disable(void *info)
555 {
556         struct perf_event *event = info;
557         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
558         struct perf_event_context *ctx = event->ctx;
559
560         /*
561          * If this is a per-task event, need to check whether this
562          * event's task is the current task on this cpu.
563          */
564         if (ctx->task && cpuctx->task_ctx != ctx)
565                 return;
566
567         raw_spin_lock(&ctx->lock);
568
569         /*
570          * If the event is on, turn it off.
571          * If it is in error state, leave it in error state.
572          */
573         if (event->state >= PERF_EVENT_STATE_INACTIVE) {
574                 update_context_time(ctx);
575                 update_group_times(event);
576                 if (event == event->group_leader)
577                         group_sched_out(event, cpuctx, ctx);
578                 else
579                         event_sched_out(event, cpuctx, ctx);
580                 event->state = PERF_EVENT_STATE_OFF;
581         }
582
583         raw_spin_unlock(&ctx->lock);
584 }
585
586 /*
587  * Disable a event.
588  *
589  * If event->ctx is a cloned context, callers must make sure that
590  * every task struct that event->ctx->task could possibly point to
591  * remains valid.  This condition is satisifed when called through
592  * perf_event_for_each_child or perf_event_for_each because they
593  * hold the top-level event's child_mutex, so any descendant that
594  * goes to exit will block in sync_child_event.
595  * When called from perf_pending_event it's OK because event->ctx
596  * is the current context on this CPU and preemption is disabled,
597  * hence we can't get into perf_event_task_sched_out for this context.
598  */
599 void perf_event_disable(struct perf_event *event)
600 {
601         struct perf_event_context *ctx = event->ctx;
602         struct task_struct *task = ctx->task;
603
604         if (!task) {
605                 /*
606                  * Disable the event on the cpu that it's on
607                  */
608                 smp_call_function_single(event->cpu, __perf_event_disable,
609                                          event, 1);
610                 return;
611         }
612
613 retry:
614         task_oncpu_function_call(task, __perf_event_disable, event);
615
616         raw_spin_lock_irq(&ctx->lock);
617         /*
618          * If the event is still active, we need to retry the cross-call.
619          */
620         if (event->state == PERF_EVENT_STATE_ACTIVE) {
621                 raw_spin_unlock_irq(&ctx->lock);
622                 goto retry;
623         }
624
625         /*
626          * Since we have the lock this context can't be scheduled
627          * in, so we can change the state safely.
628          */
629         if (event->state == PERF_EVENT_STATE_INACTIVE) {
630                 update_group_times(event);
631                 event->state = PERF_EVENT_STATE_OFF;
632         }
633
634         raw_spin_unlock_irq(&ctx->lock);
635 }
636
637 static int
638 event_sched_in(struct perf_event *event,
639                  struct perf_cpu_context *cpuctx,
640                  struct perf_event_context *ctx)
641 {
642         if (event->state <= PERF_EVENT_STATE_OFF)
643                 return 0;
644
645         event->state = PERF_EVENT_STATE_ACTIVE;
646         event->oncpu = smp_processor_id();
647         /*
648          * The new state must be visible before we turn it on in the hardware:
649          */
650         smp_wmb();
651
652         if (event->pmu->add(event, PERF_EF_START)) {
653                 event->state = PERF_EVENT_STATE_INACTIVE;
654                 event->oncpu = -1;
655                 return -EAGAIN;
656         }
657
658         event->tstamp_running += ctx->time - event->tstamp_stopped;
659
660         if (!is_software_event(event))
661                 cpuctx->active_oncpu++;
662         ctx->nr_active++;
663
664         if (event->attr.exclusive)
665                 cpuctx->exclusive = 1;
666
667         return 0;
668 }
669
670 static int
671 group_sched_in(struct perf_event *group_event,
672                struct perf_cpu_context *cpuctx,
673                struct perf_event_context *ctx)
674 {
675         struct perf_event *event, *partial_group = NULL;
676         struct pmu *pmu = group_event->pmu;
677
678         if (group_event->state == PERF_EVENT_STATE_OFF)
679                 return 0;
680
681         pmu->start_txn(pmu);
682
683         if (event_sched_in(group_event, cpuctx, ctx)) {
684                 pmu->cancel_txn(pmu);
685                 return -EAGAIN;
686         }
687
688         /*
689          * Schedule in siblings as one group (if any):
690          */
691         list_for_each_entry(event, &group_event->sibling_list, group_entry) {
692                 if (event_sched_in(event, cpuctx, ctx)) {
693                         partial_group = event;
694                         goto group_error;
695                 }
696         }
697
698         if (!pmu->commit_txn(pmu))
699                 return 0;
700
701 group_error:
702         /*
703          * Groups can be scheduled in as one unit only, so undo any
704          * partial group before returning:
705          */
706         list_for_each_entry(event, &group_event->sibling_list, group_entry) {
707                 if (event == partial_group)
708                         break;
709                 event_sched_out(event, cpuctx, ctx);
710         }
711         event_sched_out(group_event, cpuctx, ctx);
712
713         pmu->cancel_txn(pmu);
714
715         return -EAGAIN;
716 }
717
718 /*
719  * Work out whether we can put this event group on the CPU now.
720  */
721 static int group_can_go_on(struct perf_event *event,
722                            struct perf_cpu_context *cpuctx,
723                            int can_add_hw)
724 {
725         /*
726          * Groups consisting entirely of software events can always go on.
727          */
728         if (event->group_flags & PERF_GROUP_SOFTWARE)
729                 return 1;
730         /*
731          * If an exclusive group is already on, no other hardware
732          * events can go on.
733          */
734         if (cpuctx->exclusive)
735                 return 0;
736         /*
737          * If this group is exclusive and there are already
738          * events on the CPU, it can't go on.
739          */
740         if (event->attr.exclusive && cpuctx->active_oncpu)
741                 return 0;
742         /*
743          * Otherwise, try to add it if all previous groups were able
744          * to go on.
745          */
746         return can_add_hw;
747 }
748
749 static void add_event_to_ctx(struct perf_event *event,
750                                struct perf_event_context *ctx)
751 {
752         list_add_event(event, ctx);
753         perf_group_attach(event);
754         event->tstamp_enabled = ctx->time;
755         event->tstamp_running = ctx->time;
756         event->tstamp_stopped = ctx->time;
757 }
758
759 /*
760  * Cross CPU call to install and enable a performance event
761  *
762  * Must be called with ctx->mutex held
763  */
764 static void __perf_install_in_context(void *info)
765 {
766         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
767         struct perf_event *event = info;
768         struct perf_event_context *ctx = event->ctx;
769         struct perf_event *leader = event->group_leader;
770         int err;
771
772         /*
773          * If this is a task context, we need to check whether it is
774          * the current task context of this cpu. If not it has been
775          * scheduled out before the smp call arrived.
776          * Or possibly this is the right context but it isn't
777          * on this cpu because it had no events.
778          */
779         if (ctx->task && cpuctx->task_ctx != ctx) {
780                 if (cpuctx->task_ctx || ctx->task != current)
781                         return;
782                 cpuctx->task_ctx = ctx;
783         }
784
785         raw_spin_lock(&ctx->lock);
786         ctx->is_active = 1;
787         update_context_time(ctx);
788
789         add_event_to_ctx(event, ctx);
790
791         if (event->cpu != -1 && event->cpu != smp_processor_id())
792                 goto unlock;
793
794         /*
795          * Don't put the event on if it is disabled or if
796          * it is in a group and the group isn't on.
797          */
798         if (event->state != PERF_EVENT_STATE_INACTIVE ||
799             (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
800                 goto unlock;
801
802         /*
803          * An exclusive event can't go on if there are already active
804          * hardware events, and no hardware event can go on if there
805          * is already an exclusive event on.
806          */
807         if (!group_can_go_on(event, cpuctx, 1))
808                 err = -EEXIST;
809         else
810                 err = event_sched_in(event, cpuctx, ctx);
811
812         if (err) {
813                 /*
814                  * This event couldn't go on.  If it is in a group
815                  * then we have to pull the whole group off.
816                  * If the event group is pinned then put it in error state.
817                  */
818                 if (leader != event)
819                         group_sched_out(leader, cpuctx, ctx);
820                 if (leader->attr.pinned) {
821                         update_group_times(leader);
822                         leader->state = PERF_EVENT_STATE_ERROR;
823                 }
824         }
825
826         if (!err && !ctx->task && cpuctx->max_pertask)
827                 cpuctx->max_pertask--;
828
829 unlock:
830         raw_spin_unlock(&ctx->lock);
831 }
832
833 /*
834  * Attach a performance event to a context
835  *
836  * First we add the event to the list with the hardware enable bit
837  * in event->hw_config cleared.
838  *
839  * If the event is attached to a task which is on a CPU we use a smp
840  * call to enable it in the task context. The task might have been
841  * scheduled away, but we check this in the smp call again.
842  *
843  * Must be called with ctx->mutex held.
844  */
845 static void
846 perf_install_in_context(struct perf_event_context *ctx,
847                         struct perf_event *event,
848                         int cpu)
849 {
850         struct task_struct *task = ctx->task;
851
852         if (!task) {
853                 /*
854                  * Per cpu events are installed via an smp call and
855                  * the install is always successful.
856                  */
857                 smp_call_function_single(cpu, __perf_install_in_context,
858                                          event, 1);
859                 return;
860         }
861
862 retry:
863         task_oncpu_function_call(task, __perf_install_in_context,
864                                  event);
865
866         raw_spin_lock_irq(&ctx->lock);
867         /*
868          * we need to retry the smp call.
869          */
870         if (ctx->is_active && list_empty(&event->group_entry)) {
871                 raw_spin_unlock_irq(&ctx->lock);
872                 goto retry;
873         }
874
875         /*
876          * The lock prevents that this context is scheduled in so we
877          * can add the event safely, if it the call above did not
878          * succeed.
879          */
880         if (list_empty(&event->group_entry))
881                 add_event_to_ctx(event, ctx);
882         raw_spin_unlock_irq(&ctx->lock);
883 }
884
885 /*
886  * Put a event into inactive state and update time fields.
887  * Enabling the leader of a group effectively enables all
888  * the group members that aren't explicitly disabled, so we
889  * have to update their ->tstamp_enabled also.
890  * Note: this works for group members as well as group leaders
891  * since the non-leader members' sibling_lists will be empty.
892  */
893 static void __perf_event_mark_enabled(struct perf_event *event,
894                                         struct perf_event_context *ctx)
895 {
896         struct perf_event *sub;
897
898         event->state = PERF_EVENT_STATE_INACTIVE;
899         event->tstamp_enabled = ctx->time - event->total_time_enabled;
900         list_for_each_entry(sub, &event->sibling_list, group_entry) {
901                 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
902                         sub->tstamp_enabled =
903                                 ctx->time - sub->total_time_enabled;
904                 }
905         }
906 }
907
908 /*
909  * Cross CPU call to enable a performance event
910  */
911 static void __perf_event_enable(void *info)
912 {
913         struct perf_event *event = info;
914         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
915         struct perf_event_context *ctx = event->ctx;
916         struct perf_event *leader = event->group_leader;
917         int err;
918
919         /*
920          * If this is a per-task event, need to check whether this
921          * event's task is the current task on this cpu.
922          */
923         if (ctx->task && cpuctx->task_ctx != ctx) {
924                 if (cpuctx->task_ctx || ctx->task != current)
925                         return;
926                 cpuctx->task_ctx = ctx;
927         }
928
929         raw_spin_lock(&ctx->lock);
930         ctx->is_active = 1;
931         update_context_time(ctx);
932
933         if (event->state >= PERF_EVENT_STATE_INACTIVE)
934                 goto unlock;
935         __perf_event_mark_enabled(event, ctx);
936
937         if (event->cpu != -1 && event->cpu != smp_processor_id())
938                 goto unlock;
939
940         /*
941          * If the event is in a group and isn't the group leader,
942          * then don't put it on unless the group is on.
943          */
944         if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
945                 goto unlock;
946
947         if (!group_can_go_on(event, cpuctx, 1)) {
948                 err = -EEXIST;
949         } else {
950                 if (event == leader)
951                         err = group_sched_in(event, cpuctx, ctx);
952                 else
953                         err = event_sched_in(event, cpuctx, ctx);
954         }
955
956         if (err) {
957                 /*
958                  * If this event can't go on and it's part of a
959                  * group, then the whole group has to come off.
960                  */
961                 if (leader != event)
962                         group_sched_out(leader, cpuctx, ctx);
963                 if (leader->attr.pinned) {
964                         update_group_times(leader);
965                         leader->state = PERF_EVENT_STATE_ERROR;
966                 }
967         }
968
969 unlock:
970         raw_spin_unlock(&ctx->lock);
971 }
972
973 /*
974  * Enable a event.
975  *
976  * If event->ctx is a cloned context, callers must make sure that
977  * every task struct that event->ctx->task could possibly point to
978  * remains valid.  This condition is satisfied when called through
979  * perf_event_for_each_child or perf_event_for_each as described
980  * for perf_event_disable.
981  */
982 void perf_event_enable(struct perf_event *event)
983 {
984         struct perf_event_context *ctx = event->ctx;
985         struct task_struct *task = ctx->task;
986
987         if (!task) {
988                 /*
989                  * Enable the event on the cpu that it's on
990                  */
991                 smp_call_function_single(event->cpu, __perf_event_enable,
992                                          event, 1);
993                 return;
994         }
995
996         raw_spin_lock_irq(&ctx->lock);
997         if (event->state >= PERF_EVENT_STATE_INACTIVE)
998                 goto out;
999
1000         /*
1001          * If the event is in error state, clear that first.
1002          * That way, if we see the event in error state below, we
1003          * know that it has gone back into error state, as distinct
1004          * from the task having been scheduled away before the
1005          * cross-call arrived.
1006          */
1007         if (event->state == PERF_EVENT_STATE_ERROR)
1008                 event->state = PERF_EVENT_STATE_OFF;
1009
1010 retry:
1011         raw_spin_unlock_irq(&ctx->lock);
1012         task_oncpu_function_call(task, __perf_event_enable, event);
1013
1014         raw_spin_lock_irq(&ctx->lock);
1015
1016         /*
1017          * If the context is active and the event is still off,
1018          * we need to retry the cross-call.
1019          */
1020         if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
1021                 goto retry;
1022
1023         /*
1024          * Since we have the lock this context can't be scheduled
1025          * in, so we can change the state safely.
1026          */
1027         if (event->state == PERF_EVENT_STATE_OFF)
1028                 __perf_event_mark_enabled(event, ctx);
1029
1030 out:
1031         raw_spin_unlock_irq(&ctx->lock);
1032 }
1033
1034 static int perf_event_refresh(struct perf_event *event, int refresh)
1035 {
1036         /*
1037          * not supported on inherited events
1038          */
1039         if (event->attr.inherit)
1040                 return -EINVAL;
1041
1042         atomic_add(refresh, &event->event_limit);
1043         perf_event_enable(event);
1044
1045         return 0;
1046 }
1047
1048 enum event_type_t {
1049         EVENT_FLEXIBLE = 0x1,
1050         EVENT_PINNED = 0x2,
1051         EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
1052 };
1053
1054 static void ctx_sched_out(struct perf_event_context *ctx,
1055                           struct perf_cpu_context *cpuctx,
1056                           enum event_type_t event_type)
1057 {
1058         struct perf_event *event;
1059
1060         raw_spin_lock(&ctx->lock);
1061         ctx->is_active = 0;
1062         if (likely(!ctx->nr_events))
1063                 goto out;
1064         update_context_time(ctx);
1065
1066         if (!ctx->nr_active)
1067                 goto out;
1068
1069         if (event_type & EVENT_PINNED) {
1070                 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1071                         group_sched_out(event, cpuctx, ctx);
1072         }
1073
1074         if (event_type & EVENT_FLEXIBLE) {
1075                 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1076                         group_sched_out(event, cpuctx, ctx);
1077         }
1078 out:
1079         raw_spin_unlock(&ctx->lock);
1080 }
1081
1082 /*
1083  * Test whether two contexts are equivalent, i.e. whether they
1084  * have both been cloned from the same version of the same context
1085  * and they both have the same number of enabled events.
1086  * If the number of enabled events is the same, then the set
1087  * of enabled events should be the same, because these are both
1088  * inherited contexts, therefore we can't access individual events
1089  * in them directly with an fd; we can only enable/disable all
1090  * events via prctl, or enable/disable all events in a family
1091  * via ioctl, which will have the same effect on both contexts.
1092  */
1093 static int context_equiv(struct perf_event_context *ctx1,
1094                          struct perf_event_context *ctx2)
1095 {
1096         return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1097                 && ctx1->parent_gen == ctx2->parent_gen
1098                 && !ctx1->pin_count && !ctx2->pin_count;
1099 }
1100
1101 static void __perf_event_sync_stat(struct perf_event *event,
1102                                      struct perf_event *next_event)
1103 {
1104         u64 value;
1105
1106         if (!event->attr.inherit_stat)
1107                 return;
1108
1109         /*
1110          * Update the event value, we cannot use perf_event_read()
1111          * because we're in the middle of a context switch and have IRQs
1112          * disabled, which upsets smp_call_function_single(), however
1113          * we know the event must be on the current CPU, therefore we
1114          * don't need to use it.
1115          */
1116         switch (event->state) {
1117         case PERF_EVENT_STATE_ACTIVE:
1118                 event->pmu->read(event);
1119                 /* fall-through */
1120
1121         case PERF_EVENT_STATE_INACTIVE:
1122                 update_event_times(event);
1123                 break;
1124
1125         default:
1126                 break;
1127         }
1128
1129         /*
1130          * In order to keep per-task stats reliable we need to flip the event
1131          * values when we flip the contexts.
1132          */
1133         value = local64_read(&next_event->count);
1134         value = local64_xchg(&event->count, value);
1135         local64_set(&next_event->count, value);
1136
1137         swap(event->total_time_enabled, next_event->total_time_enabled);
1138         swap(event->total_time_running, next_event->total_time_running);
1139
1140         /*
1141          * Since we swizzled the values, update the user visible data too.
1142          */
1143         perf_event_update_userpage(event);
1144         perf_event_update_userpage(next_event);
1145 }
1146
1147 #define list_next_entry(pos, member) \
1148         list_entry(pos->member.next, typeof(*pos), member)
1149
1150 static void perf_event_sync_stat(struct perf_event_context *ctx,
1151                                    struct perf_event_context *next_ctx)
1152 {
1153         struct perf_event *event, *next_event;
1154
1155         if (!ctx->nr_stat)
1156                 return;
1157
1158         update_context_time(ctx);
1159
1160         event = list_first_entry(&ctx->event_list,
1161                                    struct perf_event, event_entry);
1162
1163         next_event = list_first_entry(&next_ctx->event_list,
1164                                         struct perf_event, event_entry);
1165
1166         while (&event->event_entry != &ctx->event_list &&
1167                &next_event->event_entry != &next_ctx->event_list) {
1168
1169                 __perf_event_sync_stat(event, next_event);
1170
1171                 event = list_next_entry(event, event_entry);
1172                 next_event = list_next_entry(next_event, event_entry);
1173         }
1174 }
1175
1176 /*
1177  * Called from scheduler to remove the events of the current task,
1178  * with interrupts disabled.
1179  *
1180  * We stop each event and update the event value in event->count.
1181  *
1182  * This does not protect us against NMI, but disable()
1183  * sets the disabled bit in the control field of event _before_
1184  * accessing the event control register. If a NMI hits, then it will
1185  * not restart the event.
1186  */
1187 void perf_event_task_sched_out(struct task_struct *task,
1188                                  struct task_struct *next)
1189 {
1190         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1191         struct perf_event_context *ctx = task->perf_event_ctxp;
1192         struct perf_event_context *next_ctx;
1193         struct perf_event_context *parent;
1194         int do_switch = 1;
1195
1196         perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1197
1198         if (likely(!ctx || !cpuctx->task_ctx))
1199                 return;
1200
1201         rcu_read_lock();
1202         parent = rcu_dereference(ctx->parent_ctx);
1203         next_ctx = next->perf_event_ctxp;
1204         if (parent && next_ctx &&
1205             rcu_dereference(next_ctx->parent_ctx) == parent) {
1206                 /*
1207                  * Looks like the two contexts are clones, so we might be
1208                  * able to optimize the context switch.  We lock both
1209                  * contexts and check that they are clones under the
1210                  * lock (including re-checking that neither has been
1211                  * uncloned in the meantime).  It doesn't matter which
1212                  * order we take the locks because no other cpu could
1213                  * be trying to lock both of these tasks.
1214                  */
1215                 raw_spin_lock(&ctx->lock);
1216                 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1217                 if (context_equiv(ctx, next_ctx)) {
1218                         /*
1219                          * XXX do we need a memory barrier of sorts
1220                          * wrt to rcu_dereference() of perf_event_ctxp
1221                          */
1222                         task->perf_event_ctxp = next_ctx;
1223                         next->perf_event_ctxp = ctx;
1224                         ctx->task = next;
1225                         next_ctx->task = task;
1226                         do_switch = 0;
1227
1228                         perf_event_sync_stat(ctx, next_ctx);
1229                 }
1230                 raw_spin_unlock(&next_ctx->lock);
1231                 raw_spin_unlock(&ctx->lock);
1232         }
1233         rcu_read_unlock();
1234
1235         if (do_switch) {
1236                 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
1237                 cpuctx->task_ctx = NULL;
1238         }
1239 }
1240
1241 static void task_ctx_sched_out(struct perf_event_context *ctx,
1242                                enum event_type_t event_type)
1243 {
1244         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1245
1246         if (!cpuctx->task_ctx)
1247                 return;
1248
1249         if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1250                 return;
1251
1252         ctx_sched_out(ctx, cpuctx, event_type);
1253         cpuctx->task_ctx = NULL;
1254 }
1255
1256 /*
1257  * Called with IRQs disabled
1258  */
1259 static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1260 {
1261         task_ctx_sched_out(ctx, EVENT_ALL);
1262 }
1263
1264 /*
1265  * Called with IRQs disabled
1266  */
1267 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1268                               enum event_type_t event_type)
1269 {
1270         ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
1271 }
1272
1273 static void
1274 ctx_pinned_sched_in(struct perf_event_context *ctx,
1275                     struct perf_cpu_context *cpuctx)
1276 {
1277         struct perf_event *event;
1278
1279         list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1280                 if (event->state <= PERF_EVENT_STATE_OFF)
1281                         continue;
1282                 if (event->cpu != -1 && event->cpu != smp_processor_id())
1283                         continue;
1284
1285                 if (group_can_go_on(event, cpuctx, 1))
1286                         group_sched_in(event, cpuctx, ctx);
1287
1288                 /*
1289                  * If this pinned group hasn't been scheduled,
1290                  * put it in error state.
1291                  */
1292                 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1293                         update_group_times(event);
1294                         event->state = PERF_EVENT_STATE_ERROR;
1295                 }
1296         }
1297 }
1298
1299 static void
1300 ctx_flexible_sched_in(struct perf_event_context *ctx,
1301                       struct perf_cpu_context *cpuctx)
1302 {
1303         struct perf_event *event;
1304         int can_add_hw = 1;
1305
1306         list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1307                 /* Ignore events in OFF or ERROR state */
1308                 if (event->state <= PERF_EVENT_STATE_OFF)
1309                         continue;
1310                 /*
1311                  * Listen to the 'cpu' scheduling filter constraint
1312                  * of events:
1313                  */
1314                 if (event->cpu != -1 && event->cpu != smp_processor_id())
1315                         continue;
1316
1317                 if (group_can_go_on(event, cpuctx, can_add_hw)) {
1318                         if (group_sched_in(event, cpuctx, ctx))
1319                                 can_add_hw = 0;
1320                 }
1321         }
1322 }
1323
1324 static void
1325 ctx_sched_in(struct perf_event_context *ctx,
1326              struct perf_cpu_context *cpuctx,
1327              enum event_type_t event_type)
1328 {
1329         raw_spin_lock(&ctx->lock);
1330         ctx->is_active = 1;
1331         if (likely(!ctx->nr_events))
1332                 goto out;
1333
1334         ctx->timestamp = perf_clock();
1335
1336         /*
1337          * First go through the list and put on any pinned groups
1338          * in order to give them the best chance of going on.
1339          */
1340         if (event_type & EVENT_PINNED)
1341                 ctx_pinned_sched_in(ctx, cpuctx);
1342
1343         /* Then walk through the lower prio flexible groups */
1344         if (event_type & EVENT_FLEXIBLE)
1345                 ctx_flexible_sched_in(ctx, cpuctx);
1346
1347 out:
1348         raw_spin_unlock(&ctx->lock);
1349 }
1350
1351 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
1352                              enum event_type_t event_type)
1353 {
1354         struct perf_event_context *ctx = &cpuctx->ctx;
1355
1356         ctx_sched_in(ctx, cpuctx, event_type);
1357 }
1358
1359 static void task_ctx_sched_in(struct task_struct *task,
1360                               enum event_type_t event_type)
1361 {
1362         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1363         struct perf_event_context *ctx = task->perf_event_ctxp;
1364
1365         if (likely(!ctx))
1366                 return;
1367         if (cpuctx->task_ctx == ctx)
1368                 return;
1369         ctx_sched_in(ctx, cpuctx, event_type);
1370         cpuctx->task_ctx = ctx;
1371 }
1372 /*
1373  * Called from scheduler to add the events of the current task
1374  * with interrupts disabled.
1375  *
1376  * We restore the event value and then enable it.
1377  *
1378  * This does not protect us against NMI, but enable()
1379  * sets the enabled bit in the control field of event _before_
1380  * accessing the event control register. If a NMI hits, then it will
1381  * keep the event running.
1382  */
1383 void perf_event_task_sched_in(struct task_struct *task)
1384 {
1385         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1386         struct perf_event_context *ctx = task->perf_event_ctxp;
1387
1388         if (likely(!ctx))
1389                 return;
1390
1391         if (cpuctx->task_ctx == ctx)
1392                 return;
1393
1394         /*
1395          * We want to keep the following priority order:
1396          * cpu pinned (that don't need to move), task pinned,
1397          * cpu flexible, task flexible.
1398          */
1399         cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1400
1401         ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
1402         cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1403         ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1404
1405         cpuctx->task_ctx = ctx;
1406 }
1407
1408 #define MAX_INTERRUPTS (~0ULL)
1409
1410 static void perf_log_throttle(struct perf_event *event, int enable);
1411
1412 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1413 {
1414         u64 frequency = event->attr.sample_freq;
1415         u64 sec = NSEC_PER_SEC;
1416         u64 divisor, dividend;
1417
1418         int count_fls, nsec_fls, frequency_fls, sec_fls;
1419
1420         count_fls = fls64(count);
1421         nsec_fls = fls64(nsec);
1422         frequency_fls = fls64(frequency);
1423         sec_fls = 30;
1424
1425         /*
1426          * We got @count in @nsec, with a target of sample_freq HZ
1427          * the target period becomes:
1428          *
1429          *             @count * 10^9
1430          * period = -------------------
1431          *          @nsec * sample_freq
1432          *
1433          */
1434
1435         /*
1436          * Reduce accuracy by one bit such that @a and @b converge
1437          * to a similar magnitude.
1438          */
1439 #define REDUCE_FLS(a, b)                \
1440 do {                                    \
1441         if (a##_fls > b##_fls) {        \
1442                 a >>= 1;                \
1443                 a##_fls--;              \
1444         } else {                        \
1445                 b >>= 1;                \
1446                 b##_fls--;              \
1447         }                               \
1448 } while (0)
1449
1450         /*
1451          * Reduce accuracy until either term fits in a u64, then proceed with
1452          * the other, so that finally we can do a u64/u64 division.
1453          */
1454         while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
1455                 REDUCE_FLS(nsec, frequency);
1456                 REDUCE_FLS(sec, count);
1457         }
1458
1459         if (count_fls + sec_fls > 64) {
1460                 divisor = nsec * frequency;
1461
1462                 while (count_fls + sec_fls > 64) {
1463                         REDUCE_FLS(count, sec);
1464                         divisor >>= 1;
1465                 }
1466
1467                 dividend = count * sec;
1468         } else {
1469                 dividend = count * sec;
1470
1471                 while (nsec_fls + frequency_fls > 64) {
1472                         REDUCE_FLS(nsec, frequency);
1473                         dividend >>= 1;
1474                 }
1475
1476                 divisor = nsec * frequency;
1477         }
1478
1479         if (!divisor)
1480                 return dividend;
1481
1482         return div64_u64(dividend, divisor);
1483 }
1484
1485 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
1486 {
1487         struct hw_perf_event *hwc = &event->hw;
1488         s64 period, sample_period;
1489         s64 delta;
1490
1491         period = perf_calculate_period(event, nsec, count);
1492
1493         delta = (s64)(period - hwc->sample_period);
1494         delta = (delta + 7) / 8; /* low pass filter */
1495
1496         sample_period = hwc->sample_period + delta;
1497
1498         if (!sample_period)
1499                 sample_period = 1;
1500
1501         hwc->sample_period = sample_period;
1502
1503         if (local64_read(&hwc->period_left) > 8*sample_period) {
1504                 event->pmu->stop(event, PERF_EF_UPDATE);
1505                 local64_set(&hwc->period_left, 0);
1506                 event->pmu->start(event, PERF_EF_RELOAD);
1507         }
1508 }
1509
1510 static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1511 {
1512         struct perf_event *event;
1513         struct hw_perf_event *hwc;
1514         u64 interrupts, now;
1515         s64 delta;
1516
1517         raw_spin_lock(&ctx->lock);
1518         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1519                 if (event->state != PERF_EVENT_STATE_ACTIVE)
1520                         continue;
1521
1522                 if (event->cpu != -1 && event->cpu != smp_processor_id())
1523                         continue;
1524
1525                 hwc = &event->hw;
1526
1527                 interrupts = hwc->interrupts;
1528                 hwc->interrupts = 0;
1529
1530                 /*
1531                  * unthrottle events on the tick
1532                  */
1533                 if (interrupts == MAX_INTERRUPTS) {
1534                         perf_log_throttle(event, 1);
1535                         event->pmu->start(event, 0);
1536                 }
1537
1538                 if (!event->attr.freq || !event->attr.sample_freq)
1539                         continue;
1540
1541                 event->pmu->read(event);
1542                 now = local64_read(&event->count);
1543                 delta = now - hwc->freq_count_stamp;
1544                 hwc->freq_count_stamp = now;
1545
1546                 if (delta > 0)
1547                         perf_adjust_period(event, TICK_NSEC, delta);
1548         }
1549         raw_spin_unlock(&ctx->lock);
1550 }
1551
1552 /*
1553  * Round-robin a context's events:
1554  */
1555 static void rotate_ctx(struct perf_event_context *ctx)
1556 {
1557         raw_spin_lock(&ctx->lock);
1558
1559         /* Rotate the first entry last of non-pinned groups */
1560         list_rotate_left(&ctx->flexible_groups);
1561
1562         raw_spin_unlock(&ctx->lock);
1563 }
1564
1565 void perf_event_task_tick(struct task_struct *curr)
1566 {
1567         struct perf_cpu_context *cpuctx;
1568         struct perf_event_context *ctx;
1569         int rotate = 0;
1570
1571         if (!atomic_read(&nr_events))
1572                 return;
1573
1574         cpuctx = &__get_cpu_var(perf_cpu_context);
1575         if (cpuctx->ctx.nr_events &&
1576             cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1577                 rotate = 1;
1578
1579         ctx = curr->perf_event_ctxp;
1580         if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
1581                 rotate = 1;
1582
1583         perf_ctx_adjust_freq(&cpuctx->ctx);
1584         if (ctx)
1585                 perf_ctx_adjust_freq(ctx);
1586
1587         if (!rotate)
1588                 return;
1589
1590         cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1591         if (ctx)
1592                 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
1593
1594         rotate_ctx(&cpuctx->ctx);
1595         if (ctx)
1596                 rotate_ctx(ctx);
1597
1598         cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1599         if (ctx)
1600                 task_ctx_sched_in(curr, EVENT_FLEXIBLE);
1601 }
1602
1603 static int event_enable_on_exec(struct perf_event *event,
1604                                 struct perf_event_context *ctx)
1605 {
1606         if (!event->attr.enable_on_exec)
1607                 return 0;
1608
1609         event->attr.enable_on_exec = 0;
1610         if (event->state >= PERF_EVENT_STATE_INACTIVE)
1611                 return 0;
1612
1613         __perf_event_mark_enabled(event, ctx);
1614
1615         return 1;
1616 }
1617
1618 /*
1619  * Enable all of a task's events that have been marked enable-on-exec.
1620  * This expects task == current.
1621  */
1622 static void perf_event_enable_on_exec(struct task_struct *task)
1623 {
1624         struct perf_event_context *ctx;
1625         struct perf_event *event;
1626         unsigned long flags;
1627         int enabled = 0;
1628         int ret;
1629
1630         local_irq_save(flags);
1631         ctx = task->perf_event_ctxp;
1632         if (!ctx || !ctx->nr_events)
1633                 goto out;
1634
1635         __perf_event_task_sched_out(ctx);
1636
1637         raw_spin_lock(&ctx->lock);
1638
1639         list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1640                 ret = event_enable_on_exec(event, ctx);
1641                 if (ret)
1642                         enabled = 1;
1643         }
1644
1645         list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1646                 ret = event_enable_on_exec(event, ctx);
1647                 if (ret)
1648                         enabled = 1;
1649         }
1650
1651         /*
1652          * Unclone this context if we enabled any event.
1653          */
1654         if (enabled)
1655                 unclone_ctx(ctx);
1656
1657         raw_spin_unlock(&ctx->lock);
1658
1659         perf_event_task_sched_in(task);
1660 out:
1661         local_irq_restore(flags);
1662 }
1663
1664 /*
1665  * Cross CPU call to read the hardware event
1666  */
1667 static void __perf_event_read(void *info)
1668 {
1669         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1670         struct perf_event *event = info;
1671         struct perf_event_context *ctx = event->ctx;
1672
1673         /*
1674          * If this is a task context, we need to check whether it is
1675          * the current task context of this cpu.  If not it has been
1676          * scheduled out before the smp call arrived.  In that case
1677          * event->count would have been updated to a recent sample
1678          * when the event was scheduled out.
1679          */
1680         if (ctx->task && cpuctx->task_ctx != ctx)
1681                 return;
1682
1683         raw_spin_lock(&ctx->lock);
1684         update_context_time(ctx);
1685         update_event_times(event);
1686         raw_spin_unlock(&ctx->lock);
1687
1688         event->pmu->read(event);
1689 }
1690
1691 static inline u64 perf_event_count(struct perf_event *event)
1692 {
1693         return local64_read(&event->count) + atomic64_read(&event->child_count);
1694 }
1695
1696 static u64 perf_event_read(struct perf_event *event)
1697 {
1698         /*
1699          * If event is enabled and currently active on a CPU, update the
1700          * value in the event structure:
1701          */
1702         if (event->state == PERF_EVENT_STATE_ACTIVE) {
1703                 smp_call_function_single(event->oncpu,
1704                                          __perf_event_read, event, 1);
1705         } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1706                 struct perf_event_context *ctx = event->ctx;
1707                 unsigned long flags;
1708
1709                 raw_spin_lock_irqsave(&ctx->lock, flags);
1710                 update_context_time(ctx);
1711                 update_event_times(event);
1712                 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1713         }
1714
1715         return perf_event_count(event);
1716 }
1717
1718 /*
1719  * Callchain support
1720  */
1721
1722 struct callchain_cpus_entries {
1723         struct rcu_head                 rcu_head;
1724         struct perf_callchain_entry     *cpu_entries[0];
1725 };
1726
1727 static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
1728 static atomic_t nr_callchain_events;
1729 static DEFINE_MUTEX(callchain_mutex);
1730 struct callchain_cpus_entries *callchain_cpus_entries;
1731
1732
1733 __weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
1734                                   struct pt_regs *regs)
1735 {
1736 }
1737
1738 __weak void perf_callchain_user(struct perf_callchain_entry *entry,
1739                                 struct pt_regs *regs)
1740 {
1741 }
1742
1743 static void release_callchain_buffers_rcu(struct rcu_head *head)
1744 {
1745         struct callchain_cpus_entries *entries;
1746         int cpu;
1747
1748         entries = container_of(head, struct callchain_cpus_entries, rcu_head);
1749
1750         for_each_possible_cpu(cpu)
1751                 kfree(entries->cpu_entries[cpu]);
1752
1753         kfree(entries);
1754 }
1755
1756 static void release_callchain_buffers(void)
1757 {
1758         struct callchain_cpus_entries *entries;
1759
1760         entries = callchain_cpus_entries;
1761         rcu_assign_pointer(callchain_cpus_entries, NULL);
1762         call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
1763 }
1764
1765 static int alloc_callchain_buffers(void)
1766 {
1767         int cpu;
1768         int size;
1769         struct callchain_cpus_entries *entries;
1770
1771         /*
1772          * We can't use the percpu allocation API for data that can be
1773          * accessed from NMI. Use a temporary manual per cpu allocation
1774          * until that gets sorted out.
1775          */
1776         size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
1777                 num_possible_cpus();
1778
1779         entries = kzalloc(size, GFP_KERNEL);
1780         if (!entries)
1781                 return -ENOMEM;
1782
1783         size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
1784
1785         for_each_possible_cpu(cpu) {
1786                 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
1787                                                          cpu_to_node(cpu));
1788                 if (!entries->cpu_entries[cpu])
1789                         goto fail;
1790         }
1791
1792         rcu_assign_pointer(callchain_cpus_entries, entries);
1793
1794         return 0;
1795
1796 fail:
1797         for_each_possible_cpu(cpu)
1798                 kfree(entries->cpu_entries[cpu]);
1799         kfree(entries);
1800
1801         return -ENOMEM;
1802 }
1803
1804 static int get_callchain_buffers(void)
1805 {
1806         int err = 0;
1807         int count;
1808
1809         mutex_lock(&callchain_mutex);
1810
1811         count = atomic_inc_return(&nr_callchain_events);
1812         if (WARN_ON_ONCE(count < 1)) {
1813                 err = -EINVAL;
1814                 goto exit;
1815         }
1816
1817         if (count > 1) {
1818                 /* If the allocation failed, give up */
1819                 if (!callchain_cpus_entries)
1820                         err = -ENOMEM;
1821                 goto exit;
1822         }
1823
1824         err = alloc_callchain_buffers();
1825         if (err)
1826                 release_callchain_buffers();
1827 exit:
1828         mutex_unlock(&callchain_mutex);
1829
1830         return err;
1831 }
1832
1833 static void put_callchain_buffers(void)
1834 {
1835         if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
1836                 release_callchain_buffers();
1837                 mutex_unlock(&callchain_mutex);
1838         }
1839 }
1840
1841 static int get_recursion_context(int *recursion)
1842 {
1843         int rctx;
1844
1845         if (in_nmi())
1846                 rctx = 3;
1847         else if (in_irq())
1848                 rctx = 2;
1849         else if (in_softirq())
1850                 rctx = 1;
1851         else
1852                 rctx = 0;
1853
1854         if (recursion[rctx])
1855                 return -1;
1856
1857         recursion[rctx]++;
1858         barrier();
1859
1860         return rctx;
1861 }
1862
1863 static inline void put_recursion_context(int *recursion, int rctx)
1864 {
1865         barrier();
1866         recursion[rctx]--;
1867 }
1868
1869 static struct perf_callchain_entry *get_callchain_entry(int *rctx)
1870 {
1871         int cpu;
1872         struct callchain_cpus_entries *entries;
1873
1874         *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
1875         if (*rctx == -1)
1876                 return NULL;
1877
1878         entries = rcu_dereference(callchain_cpus_entries);
1879         if (!entries)
1880                 return NULL;
1881
1882         cpu = smp_processor_id();
1883
1884         return &entries->cpu_entries[cpu][*rctx];
1885 }
1886
1887 static void
1888 put_callchain_entry(int rctx)
1889 {
1890         put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
1891 }
1892
1893 static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1894 {
1895         int rctx;
1896         struct perf_callchain_entry *entry;
1897
1898
1899         entry = get_callchain_entry(&rctx);
1900         if (rctx == -1)
1901                 return NULL;
1902
1903         if (!entry)
1904                 goto exit_put;
1905
1906         entry->nr = 0;
1907
1908         if (!user_mode(regs)) {
1909                 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
1910                 perf_callchain_kernel(entry, regs);
1911                 if (current->mm)
1912                         regs = task_pt_regs(current);
1913                 else
1914                         regs = NULL;
1915         }
1916
1917         if (regs) {
1918                 perf_callchain_store(entry, PERF_CONTEXT_USER);
1919                 perf_callchain_user(entry, regs);
1920         }
1921
1922 exit_put:
1923         put_callchain_entry(rctx);
1924
1925         return entry;
1926 }
1927
1928 /*
1929  * Initialize the perf_event context in a task_struct:
1930  */
1931 static void
1932 __perf_event_init_context(struct perf_event_context *ctx,
1933                             struct task_struct *task)
1934 {
1935         raw_spin_lock_init(&ctx->lock);
1936         mutex_init(&ctx->mutex);
1937         INIT_LIST_HEAD(&ctx->pinned_groups);
1938         INIT_LIST_HEAD(&ctx->flexible_groups);
1939         INIT_LIST_HEAD(&ctx->event_list);
1940         atomic_set(&ctx->refcount, 1);
1941         ctx->task = task;
1942 }
1943
1944 static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1945 {
1946         struct perf_event_context *ctx;
1947         struct perf_cpu_context *cpuctx;
1948         struct task_struct *task;
1949         unsigned long flags;
1950         int err;
1951
1952         if (pid == -1 && cpu != -1) {
1953                 /* Must be root to operate on a CPU event: */
1954                 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1955                         return ERR_PTR(-EACCES);
1956
1957                 if (cpu < 0 || cpu >= nr_cpumask_bits)
1958                         return ERR_PTR(-EINVAL);
1959
1960                 /*
1961                  * We could be clever and allow to attach a event to an
1962                  * offline CPU and activate it when the CPU comes up, but
1963                  * that's for later.
1964                  */
1965                 if (!cpu_online(cpu))
1966                         return ERR_PTR(-ENODEV);
1967
1968                 cpuctx = &per_cpu(perf_cpu_context, cpu);
1969                 ctx = &cpuctx->ctx;
1970                 get_ctx(ctx);
1971
1972                 return ctx;
1973         }
1974
1975         rcu_read_lock();
1976         if (!pid)
1977                 task = current;
1978         else
1979                 task = find_task_by_vpid(pid);
1980         if (task)
1981                 get_task_struct(task);
1982         rcu_read_unlock();
1983
1984         if (!task)
1985                 return ERR_PTR(-ESRCH);
1986
1987         /*
1988          * Can't attach events to a dying task.
1989          */
1990         err = -ESRCH;
1991         if (task->flags & PF_EXITING)
1992                 goto errout;
1993
1994         /* Reuse ptrace permission checks for now. */
1995         err = -EACCES;
1996         if (!ptrace_may_access(task, PTRACE_MODE_READ))
1997                 goto errout;
1998
1999 retry:
2000         ctx = perf_lock_task_context(task, &flags);
2001         if (ctx) {
2002                 unclone_ctx(ctx);
2003                 raw_spin_unlock_irqrestore(&ctx->lock, flags);
2004         }
2005
2006         if (!ctx) {
2007                 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2008                 err = -ENOMEM;
2009                 if (!ctx)
2010                         goto errout;
2011                 __perf_event_init_context(ctx, task);
2012                 get_ctx(ctx);
2013                 if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
2014                         /*
2015                          * We raced with some other task; use
2016                          * the context they set.
2017                          */
2018                         kfree(ctx);
2019                         goto retry;
2020                 }
2021                 get_task_struct(task);
2022         }
2023
2024         put_task_struct(task);
2025         return ctx;
2026
2027 errout:
2028         put_task_struct(task);
2029         return ERR_PTR(err);
2030 }
2031
2032 static void perf_event_free_filter(struct perf_event *event);
2033
2034 static void free_event_rcu(struct rcu_head *head)
2035 {
2036         struct perf_event *event;
2037
2038         event = container_of(head, struct perf_event, rcu_head);
2039         if (event->ns)
2040                 put_pid_ns(event->ns);
2041         perf_event_free_filter(event);
2042         kfree(event);
2043 }
2044
2045 static void perf_pending_sync(struct perf_event *event);
2046 static void perf_buffer_put(struct perf_buffer *buffer);
2047
2048 static void free_event(struct perf_event *event)
2049 {
2050         perf_pending_sync(event);
2051
2052         if (!event->parent) {
2053                 atomic_dec(&nr_events);
2054                 if (event->attr.mmap || event->attr.mmap_data)
2055                         atomic_dec(&nr_mmap_events);
2056                 if (event->attr.comm)
2057                         atomic_dec(&nr_comm_events);
2058                 if (event->attr.task)
2059                         atomic_dec(&nr_task_events);
2060                 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2061                         put_callchain_buffers();
2062         }
2063
2064         if (event->buffer) {
2065                 perf_buffer_put(event->buffer);
2066                 event->buffer = NULL;
2067         }
2068
2069         if (event->destroy)
2070                 event->destroy(event);
2071
2072         put_ctx(event->ctx);
2073         call_rcu(&event->rcu_head, free_event_rcu);
2074 }
2075
2076 int perf_event_release_kernel(struct perf_event *event)
2077 {
2078         struct perf_event_context *ctx = event->ctx;
2079
2080         /*
2081          * Remove from the PMU, can't get re-enabled since we got
2082          * here because the last ref went.
2083          */
2084         perf_event_disable(event);
2085
2086         WARN_ON_ONCE(ctx->parent_ctx);
2087         /*
2088          * There are two ways this annotation is useful:
2089          *
2090          *  1) there is a lock recursion from perf_event_exit_task
2091          *     see the comment there.
2092          *
2093          *  2) there is a lock-inversion with mmap_sem through
2094          *     perf_event_read_group(), which takes faults while
2095          *     holding ctx->mutex, however this is called after
2096          *     the last filedesc died, so there is no possibility
2097          *     to trigger the AB-BA case.
2098          */
2099         mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
2100         raw_spin_lock_irq(&ctx->lock);
2101         perf_group_detach(event);
2102         list_del_event(event, ctx);
2103         raw_spin_unlock_irq(&ctx->lock);
2104         mutex_unlock(&ctx->mutex);
2105
2106         mutex_lock(&event->owner->perf_event_mutex);
2107         list_del_init(&event->owner_entry);
2108         mutex_unlock(&event->owner->perf_event_mutex);
2109         put_task_struct(event->owner);
2110
2111         free_event(event);
2112
2113         return 0;
2114 }
2115 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2116
2117 /*
2118  * Called when the last reference to the file is gone.
2119  */
2120 static int perf_release(struct inode *inode, struct file *file)
2121 {
2122         struct perf_event *event = file->private_data;
2123
2124         file->private_data = NULL;
2125
2126         return perf_event_release_kernel(event);
2127 }
2128
2129 static int perf_event_read_size(struct perf_event *event)
2130 {
2131         int entry = sizeof(u64); /* value */
2132         int size = 0;
2133         int nr = 1;
2134
2135         if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2136                 size += sizeof(u64);
2137
2138         if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2139                 size += sizeof(u64);
2140
2141         if (event->attr.read_format & PERF_FORMAT_ID)
2142                 entry += sizeof(u64);
2143
2144         if (event->attr.read_format & PERF_FORMAT_GROUP) {
2145                 nr += event->group_leader->nr_siblings;
2146                 size += sizeof(u64);
2147         }
2148
2149         size += entry * nr;
2150
2151         return size;
2152 }
2153
2154 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
2155 {
2156         struct perf_event *child;
2157         u64 total = 0;
2158
2159         *enabled = 0;
2160         *running = 0;
2161
2162         mutex_lock(&event->child_mutex);
2163         total += perf_event_read(event);
2164         *enabled += event->total_time_enabled +
2165                         atomic64_read(&event->child_total_time_enabled);
2166         *running += event->total_time_running +
2167                         atomic64_read(&event->child_total_time_running);
2168
2169         list_for_each_entry(child, &event->child_list, child_list) {
2170                 total += perf_event_read(child);
2171                 *enabled += child->total_time_enabled;
2172                 *running += child->total_time_running;
2173         }
2174         mutex_unlock(&event->child_mutex);
2175
2176         return total;
2177 }
2178 EXPORT_SYMBOL_GPL(perf_event_read_value);
2179
2180 static int perf_event_read_group(struct perf_event *event,
2181                                    u64 read_format, char __user *buf)
2182 {
2183         struct perf_event *leader = event->group_leader, *sub;
2184         int n = 0, size = 0, ret = -EFAULT;
2185         struct perf_event_context *ctx = leader->ctx;
2186         u64 values[5];
2187         u64 count, enabled, running;
2188
2189         mutex_lock(&ctx->mutex);
2190         count = perf_event_read_value(leader, &enabled, &running);
2191
2192         values[n++] = 1 + leader->nr_siblings;
2193         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2194                 values[n++] = enabled;
2195         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2196                 values[n++] = running;
2197         values[n++] = count;
2198         if (read_format & PERF_FORMAT_ID)
2199                 values[n++] = primary_event_id(leader);
2200
2201         size = n * sizeof(u64);
2202
2203         if (copy_to_user(buf, values, size))
2204                 goto unlock;
2205
2206         ret = size;
2207
2208         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2209                 n = 0;
2210
2211                 values[n++] = perf_event_read_value(sub, &enabled, &running);
2212                 if (read_format & PERF_FORMAT_ID)
2213                         values[n++] = primary_event_id(sub);
2214
2215                 size = n * sizeof(u64);
2216
2217                 if (copy_to_user(buf + ret, values, size)) {
2218                         ret = -EFAULT;
2219                         goto unlock;
2220                 }
2221
2222                 ret += size;
2223         }
2224 unlock:
2225         mutex_unlock(&ctx->mutex);
2226
2227         return ret;
2228 }
2229
2230 static int perf_event_read_one(struct perf_event *event,
2231                                  u64 read_format, char __user *buf)
2232 {
2233         u64 enabled, running;
2234         u64 values[4];
2235         int n = 0;
2236
2237         values[n++] = perf_event_read_value(event, &enabled, &running);
2238         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2239                 values[n++] = enabled;
2240         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2241                 values[n++] = running;
2242         if (read_format & PERF_FORMAT_ID)
2243                 values[n++] = primary_event_id(event);
2244
2245         if (copy_to_user(buf, values, n * sizeof(u64)))
2246                 return -EFAULT;
2247
2248         return n * sizeof(u64);
2249 }
2250
2251 /*
2252  * Read the performance event - simple non blocking version for now
2253  */
2254 static ssize_t
2255 perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
2256 {
2257         u64 read_format = event->attr.read_format;
2258         int ret;
2259
2260         /*
2261          * Return end-of-file for a read on a event that is in
2262          * error state (i.e. because it was pinned but it couldn't be
2263          * scheduled on to the CPU at some point).
2264          */
2265         if (event->state == PERF_EVENT_STATE_ERROR)
2266                 return 0;
2267
2268         if (count < perf_event_read_size(event))
2269                 return -ENOSPC;
2270
2271         WARN_ON_ONCE(event->ctx->parent_ctx);
2272         if (read_format & PERF_FORMAT_GROUP)
2273                 ret = perf_event_read_group(event, read_format, buf);
2274         else
2275                 ret = perf_event_read_one(event, read_format, buf);
2276
2277         return ret;
2278 }
2279
2280 static ssize_t
2281 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
2282 {
2283         struct perf_event *event = file->private_data;
2284
2285         return perf_read_hw(event, buf, count);
2286 }
2287
2288 static unsigned int perf_poll(struct file *file, poll_table *wait)
2289 {
2290         struct perf_event *event = file->private_data;
2291         struct perf_buffer *buffer;
2292         unsigned int events = POLL_HUP;
2293
2294         rcu_read_lock();
2295         buffer = rcu_dereference(event->buffer);
2296         if (buffer)
2297                 events = atomic_xchg(&buffer->poll, 0);
2298         rcu_read_unlock();
2299
2300         poll_wait(file, &event->waitq, wait);
2301
2302         return events;
2303 }
2304
2305 static void perf_event_reset(struct perf_event *event)
2306 {
2307         (void)perf_event_read(event);
2308         local64_set(&event->count, 0);
2309         perf_event_update_userpage(event);
2310 }
2311
2312 /*
2313  * Holding the top-level event's child_mutex means that any
2314  * descendant process that has inherited this event will block
2315  * in sync_child_event if it goes to exit, thus satisfying the
2316  * task existence requirements of perf_event_enable/disable.
2317  */
2318 static void perf_event_for_each_child(struct perf_event *event,
2319                                         void (*func)(struct perf_event *))
2320 {
2321         struct perf_event *child;
2322
2323         WARN_ON_ONCE(event->ctx->parent_ctx);
2324         mutex_lock(&event->child_mutex);
2325         func(event);
2326         list_for_each_entry(child, &event->child_list, child_list)
2327                 func(child);
2328         mutex_unlock(&event->child_mutex);
2329 }
2330
2331 static void perf_event_for_each(struct perf_event *event,
2332                                   void (*func)(struct perf_event *))
2333 {
2334         struct perf_event_context *ctx = event->ctx;
2335         struct perf_event *sibling;
2336
2337         WARN_ON_ONCE(ctx->parent_ctx);
2338         mutex_lock(&ctx->mutex);
2339         event = event->group_leader;
2340
2341         perf_event_for_each_child(event, func);
2342         func(event);
2343         list_for_each_entry(sibling, &event->sibling_list, group_entry)
2344                 perf_event_for_each_child(event, func);
2345         mutex_unlock(&ctx->mutex);
2346 }
2347
2348 static int perf_event_period(struct perf_event *event, u64 __user *arg)
2349 {
2350         struct perf_event_context *ctx = event->ctx;
2351         unsigned long size;
2352         int ret = 0;
2353         u64 value;
2354
2355         if (!event->attr.sample_period)
2356                 return -EINVAL;
2357
2358         size = copy_from_user(&value, arg, sizeof(value));
2359         if (size != sizeof(value))
2360                 return -EFAULT;
2361
2362         if (!value)
2363                 return -EINVAL;
2364
2365         raw_spin_lock_irq(&ctx->lock);
2366         if (event->attr.freq) {
2367                 if (value > sysctl_perf_event_sample_rate) {
2368                         ret = -EINVAL;
2369                         goto unlock;
2370                 }
2371
2372                 event->attr.sample_freq = value;
2373         } else {
2374                 event->attr.sample_period = value;
2375                 event->hw.sample_period = value;
2376         }
2377 unlock:
2378         raw_spin_unlock_irq(&ctx->lock);
2379
2380         return ret;
2381 }
2382
2383 static const struct file_operations perf_fops;
2384
2385 static struct perf_event *perf_fget_light(int fd, int *fput_needed)
2386 {
2387         struct file *file;
2388
2389         file = fget_light(fd, fput_needed);
2390         if (!file)
2391                 return ERR_PTR(-EBADF);
2392
2393         if (file->f_op != &perf_fops) {
2394                 fput_light(file, *fput_needed);
2395                 *fput_needed = 0;
2396                 return ERR_PTR(-EBADF);
2397         }
2398
2399         return file->private_data;
2400 }
2401
2402 static int perf_event_set_output(struct perf_event *event,
2403                                  struct perf_event *output_event);
2404 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
2405
2406 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2407 {
2408         struct perf_event *event = file->private_data;
2409         void (*func)(struct perf_event *);
2410         u32 flags = arg;
2411
2412         switch (cmd) {
2413         case PERF_EVENT_IOC_ENABLE:
2414                 func = perf_event_enable;
2415                 break;
2416         case PERF_EVENT_IOC_DISABLE:
2417                 func = perf_event_disable;
2418                 break;
2419         case PERF_EVENT_IOC_RESET:
2420                 func = perf_event_reset;
2421                 break;
2422
2423         case PERF_EVENT_IOC_REFRESH:
2424                 return perf_event_refresh(event, arg);
2425
2426         case PERF_EVENT_IOC_PERIOD:
2427                 return perf_event_period(event, (u64 __user *)arg);
2428
2429         case PERF_EVENT_IOC_SET_OUTPUT:
2430         {
2431                 struct perf_event *output_event = NULL;
2432                 int fput_needed = 0;
2433                 int ret;
2434
2435                 if (arg != -1) {
2436                         output_event = perf_fget_light(arg, &fput_needed);
2437                         if (IS_ERR(output_event))
2438                                 return PTR_ERR(output_event);
2439                 }
2440
2441                 ret = perf_event_set_output(event, output_event);
2442                 if (output_event)
2443                         fput_light(output_event->filp, fput_needed);
2444
2445                 return ret;
2446         }
2447
2448         case PERF_EVENT_IOC_SET_FILTER:
2449                 return perf_event_set_filter(event, (void __user *)arg);
2450
2451         default:
2452                 return -ENOTTY;
2453         }
2454
2455         if (flags & PERF_IOC_FLAG_GROUP)
2456                 perf_event_for_each(event, func);
2457         else
2458                 perf_event_for_each_child(event, func);
2459
2460         return 0;
2461 }
2462
2463 int perf_event_task_enable(void)
2464 {
2465         struct perf_event *event;
2466
2467         mutex_lock(&current->perf_event_mutex);
2468         list_for_each_entry(event, &current->perf_event_list, owner_entry)
2469                 perf_event_for_each_child(event, perf_event_enable);
2470         mutex_unlock(&current->perf_event_mutex);
2471
2472         return 0;
2473 }
2474
2475 int perf_event_task_disable(void)
2476 {
2477         struct perf_event *event;
2478
2479         mutex_lock(&current->perf_event_mutex);
2480         list_for_each_entry(event, &current->perf_event_list, owner_entry)
2481                 perf_event_for_each_child(event, perf_event_disable);
2482         mutex_unlock(&current->perf_event_mutex);
2483
2484         return 0;
2485 }
2486
2487 #ifndef PERF_EVENT_INDEX_OFFSET
2488 # define PERF_EVENT_INDEX_OFFSET 0
2489 #endif
2490
2491 static int perf_event_index(struct perf_event *event)
2492 {
2493         if (event->hw.state & PERF_HES_STOPPED)
2494                 return 0;
2495
2496         if (event->state != PERF_EVENT_STATE_ACTIVE)
2497                 return 0;
2498
2499         return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2500 }
2501
2502 /*
2503  * Callers need to ensure there can be no nesting of this function, otherwise
2504  * the seqlock logic goes bad. We can not serialize this because the arch
2505  * code calls this from NMI context.
2506  */
2507 void perf_event_update_userpage(struct perf_event *event)
2508 {
2509         struct perf_event_mmap_page *userpg;
2510         struct perf_buffer *buffer;
2511
2512         rcu_read_lock();
2513         buffer = rcu_dereference(event->buffer);
2514         if (!buffer)
2515                 goto unlock;
2516
2517         userpg = buffer->user_page;
2518
2519         /*
2520          * Disable preemption so as to not let the corresponding user-space
2521          * spin too long if we get preempted.
2522          */
2523         preempt_disable();
2524         ++userpg->lock;
2525         barrier();
2526         userpg->index = perf_event_index(event);
2527         userpg->offset = perf_event_count(event);
2528         if (event->state == PERF_EVENT_STATE_ACTIVE)
2529                 userpg->offset -= local64_read(&event->hw.prev_count);
2530
2531         userpg->time_enabled = event->total_time_enabled +
2532                         atomic64_read(&event->child_total_time_enabled);
2533
2534         userpg->time_running = event->total_time_running +
2535                         atomic64_read(&event->child_total_time_running);
2536
2537         barrier();
2538         ++userpg->lock;
2539         preempt_enable();
2540 unlock:
2541         rcu_read_unlock();
2542 }
2543
2544 static unsigned long perf_data_size(struct perf_buffer *buffer);
2545
2546 static void
2547 perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags)
2548 {
2549         long max_size = perf_data_size(buffer);
2550
2551         if (watermark)
2552                 buffer->watermark = min(max_size, watermark);
2553
2554         if (!buffer->watermark)
2555                 buffer->watermark = max_size / 2;
2556
2557         if (flags & PERF_BUFFER_WRITABLE)
2558                 buffer->writable = 1;
2559
2560         atomic_set(&buffer->refcount, 1);
2561 }
2562
2563 #ifndef CONFIG_PERF_USE_VMALLOC
2564
2565 /*
2566  * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2567  */
2568
2569 static struct page *
2570 perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
2571 {
2572         if (pgoff > buffer->nr_pages)
2573                 return NULL;
2574
2575         if (pgoff == 0)
2576                 return virt_to_page(buffer->user_page);
2577
2578         return virt_to_page(buffer->data_pages[pgoff - 1]);
2579 }
2580
2581 static void *perf_mmap_alloc_page(int cpu)
2582 {
2583         struct page *page;
2584         int node;
2585
2586         node = (cpu == -1) ? cpu : cpu_to_node(cpu);
2587         page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
2588         if (!page)
2589                 return NULL;
2590
2591         return page_address(page);
2592 }
2593
2594 static struct perf_buffer *
2595 perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
2596 {
2597         struct perf_buffer *buffer;
2598         unsigned long size;
2599         int i;
2600
2601         size = sizeof(struct perf_buffer);
2602         size += nr_pages * sizeof(void *);
2603
2604         buffer = kzalloc(size, GFP_KERNEL);
2605         if (!buffer)
2606                 goto fail;
2607
2608         buffer->user_page = perf_mmap_alloc_page(cpu);
2609         if (!buffer->user_page)
2610                 goto fail_user_page;
2611
2612         for (i = 0; i < nr_pages; i++) {
2613                 buffer->data_pages[i] = perf_mmap_alloc_page(cpu);
2614                 if (!buffer->data_pages[i])
2615                         goto fail_data_pages;
2616         }
2617
2618         buffer->nr_pages = nr_pages;
2619
2620         perf_buffer_init(buffer, watermark, flags);
2621
2622         return buffer;
2623
2624 fail_data_pages:
2625         for (i--; i >= 0; i--)
2626                 free_page((unsigned long)buffer->data_pages[i]);
2627
2628         free_page((unsigned long)buffer->user_page);
2629
2630 fail_user_page:
2631         kfree(buffer);
2632
2633 fail:
2634         return NULL;
2635 }
2636
2637 static void perf_mmap_free_page(unsigned long addr)
2638 {
2639         struct page *page = virt_to_page((void *)addr);
2640
2641         page->mapping = NULL;
2642         __free_page(page);
2643 }
2644
2645 static void perf_buffer_free(struct perf_buffer *buffer)
2646 {
2647         int i;
2648
2649         perf_mmap_free_page((unsigned long)buffer->user_page);
2650         for (i = 0; i < buffer->nr_pages; i++)
2651                 perf_mmap_free_page((unsigned long)buffer->data_pages[i]);
2652         kfree(buffer);
2653 }
2654
2655 static inline int page_order(struct perf_buffer *buffer)
2656 {
2657         return 0;
2658 }
2659
2660 #else
2661
2662 /*
2663  * Back perf_mmap() with vmalloc memory.
2664  *
2665  * Required for architectures that have d-cache aliasing issues.
2666  */
2667
2668 static inline int page_order(struct perf_buffer *buffer)
2669 {
2670         return buffer->page_order;
2671 }
2672
2673 static struct page *
2674 perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
2675 {
2676         if (pgoff > (1UL << page_order(buffer)))
2677                 return NULL;
2678
2679         return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE);
2680 }
2681
2682 static void perf_mmap_unmark_page(void *addr)
2683 {
2684         struct page *page = vmalloc_to_page(addr);
2685
2686         page->mapping = NULL;
2687 }
2688
2689 static void perf_buffer_free_work(struct work_struct *work)
2690 {
2691         struct perf_buffer *buffer;
2692         void *base;
2693         int i, nr;
2694
2695         buffer = container_of(work, struct perf_buffer, work);
2696         nr = 1 << page_order(buffer);
2697
2698         base = buffer->user_page;
2699         for (i = 0; i < nr + 1; i++)
2700                 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2701
2702         vfree(base);
2703         kfree(buffer);
2704 }
2705
2706 static void perf_buffer_free(struct perf_buffer *buffer)
2707 {
2708         schedule_work(&buffer->work);
2709 }
2710
2711 static struct perf_buffer *
2712 perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
2713 {
2714         struct perf_buffer *buffer;
2715         unsigned long size;
2716         void *all_buf;
2717
2718         size = sizeof(struct perf_buffer);
2719         size += sizeof(void *);
2720
2721         buffer = kzalloc(size, GFP_KERNEL);
2722         if (!buffer)
2723                 goto fail;
2724
2725         INIT_WORK(&buffer->work, perf_buffer_free_work);
2726
2727         all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2728         if (!all_buf)
2729                 goto fail_all_buf;
2730
2731         buffer->user_page = all_buf;
2732         buffer->data_pages[0] = all_buf + PAGE_SIZE;
2733         buffer->page_order = ilog2(nr_pages);
2734         buffer->nr_pages = 1;
2735
2736         perf_buffer_init(buffer, watermark, flags);
2737
2738         return buffer;
2739
2740 fail_all_buf:
2741         kfree(buffer);
2742
2743 fail:
2744         return NULL;
2745 }
2746
2747 #endif
2748
2749 static unsigned long perf_data_size(struct perf_buffer *buffer)
2750 {
2751         return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer));
2752 }
2753
2754 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2755 {
2756         struct perf_event *event = vma->vm_file->private_data;
2757         struct perf_buffer *buffer;
2758         int ret = VM_FAULT_SIGBUS;
2759
2760         if (vmf->flags & FAULT_FLAG_MKWRITE) {
2761                 if (vmf->pgoff == 0)
2762                         ret = 0;
2763                 return ret;
2764         }
2765
2766         rcu_read_lock();
2767         buffer = rcu_dereference(event->buffer);
2768         if (!buffer)
2769                 goto unlock;
2770
2771         if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
2772                 goto unlock;
2773
2774         vmf->page = perf_mmap_to_page(buffer, vmf->pgoff);
2775         if (!vmf->page)
2776                 goto unlock;
2777
2778         get_page(vmf->page);
2779         vmf->page->mapping = vma->vm_file->f_mapping;
2780         vmf->page->index   = vmf->pgoff;
2781
2782         ret = 0;
2783 unlock:
2784         rcu_read_unlock();
2785
2786         return ret;
2787 }
2788
2789 static void perf_buffer_free_rcu(struct rcu_head *rcu_head)
2790 {
2791         struct perf_buffer *buffer;
2792
2793         buffer = container_of(rcu_head, struct perf_buffer, rcu_head);
2794         perf_buffer_free(buffer);
2795 }
2796
2797 static struct perf_buffer *perf_buffer_get(struct perf_event *event)
2798 {
2799         struct perf_buffer *buffer;
2800
2801         rcu_read_lock();
2802         buffer = rcu_dereference(event->buffer);
2803         if (buffer) {
2804                 if (!atomic_inc_not_zero(&buffer->refcount))
2805                         buffer = NULL;
2806         }
2807         rcu_read_unlock();
2808
2809         return buffer;
2810 }
2811
2812 static void perf_buffer_put(struct perf_buffer *buffer)
2813 {
2814         if (!atomic_dec_and_test(&buffer->refcount))
2815                 return;
2816
2817         call_rcu(&buffer->rcu_head, perf_buffer_free_rcu);
2818 }
2819
2820 static void perf_mmap_open(struct vm_area_struct *vma)
2821 {
2822         struct perf_event *event = vma->vm_file->private_data;
2823
2824         atomic_inc(&event->mmap_count);
2825 }
2826
2827 static void perf_mmap_close(struct vm_area_struct *vma)
2828 {
2829         struct perf_event *event = vma->vm_file->private_data;
2830
2831         if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
2832                 unsigned long size = perf_data_size(event->buffer);
2833                 struct user_struct *user = event->mmap_user;
2834                 struct perf_buffer *buffer = event->buffer;
2835
2836                 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
2837                 vma->vm_mm->locked_vm -= event->mmap_locked;
2838                 rcu_assign_pointer(event->buffer, NULL);
2839                 mutex_unlock(&event->mmap_mutex);
2840
2841                 perf_buffer_put(buffer);
2842                 free_uid(user);
2843         }
2844 }
2845
2846 static const struct vm_operations_struct perf_mmap_vmops = {
2847         .open           = perf_mmap_open,
2848         .close          = perf_mmap_close,
2849         .fault          = perf_mmap_fault,
2850         .page_mkwrite   = perf_mmap_fault,
2851 };
2852
2853 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2854 {
2855         struct perf_event *event = file->private_data;
2856         unsigned long user_locked, user_lock_limit;
2857         struct user_struct *user = current_user();
2858         unsigned long locked, lock_limit;
2859         struct perf_buffer *buffer;
2860         unsigned long vma_size;
2861         unsigned long nr_pages;
2862         long user_extra, extra;
2863         int ret = 0, flags = 0;
2864
2865         /*
2866          * Don't allow mmap() of inherited per-task counters. This would
2867          * create a performance issue due to all children writing to the
2868          * same buffer.
2869          */
2870         if (event->cpu == -1 && event->attr.inherit)
2871                 return -EINVAL;
2872
2873         if (!(vma->vm_flags & VM_SHARED))
2874                 return -EINVAL;
2875
2876         vma_size = vma->vm_end - vma->vm_start;
2877         nr_pages = (vma_size / PAGE_SIZE) - 1;
2878
2879         /*
2880          * If we have buffer pages ensure they're a power-of-two number, so we
2881          * can do bitmasks instead of modulo.
2882          */
2883         if (nr_pages != 0 && !is_power_of_2(nr_pages))
2884                 return -EINVAL;
2885
2886         if (vma_size != PAGE_SIZE * (1 + nr_pages))
2887                 return -EINVAL;
2888
2889         if (vma->vm_pgoff != 0)
2890                 return -EINVAL;
2891
2892         WARN_ON_ONCE(event->ctx->parent_ctx);
2893         mutex_lock(&event->mmap_mutex);
2894         if (event->buffer) {
2895                 if (event->buffer->nr_pages == nr_pages)
2896                         atomic_inc(&event->buffer->refcount);
2897                 else
2898                         ret = -EINVAL;
2899                 goto unlock;
2900         }
2901
2902         user_extra = nr_pages + 1;
2903         user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
2904
2905         /*
2906          * Increase the limit linearly with more CPUs:
2907          */
2908         user_lock_limit *= num_online_cpus();
2909
2910         user_locked = atomic_long_read(&user->locked_vm) + user_extra;
2911
2912         extra = 0;
2913         if (user_locked > user_lock_limit)
2914                 extra = user_locked - user_lock_limit;
2915
2916         lock_limit = rlimit(RLIMIT_MEMLOCK);
2917         lock_limit >>= PAGE_SHIFT;
2918         locked = vma->vm_mm->locked_vm + extra;
2919
2920         if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
2921                 !capable(CAP_IPC_LOCK)) {
2922                 ret = -EPERM;
2923                 goto unlock;
2924         }
2925
2926         WARN_ON(event->buffer);
2927
2928         if (vma->vm_flags & VM_WRITE)
2929                 flags |= PERF_BUFFER_WRITABLE;
2930
2931         buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark,
2932                                    event->cpu, flags);
2933         if (!buffer) {
2934                 ret = -ENOMEM;
2935                 goto unlock;
2936         }
2937         rcu_assign_pointer(event->buffer, buffer);
2938
2939         atomic_long_add(user_extra, &user->locked_vm);
2940         event->mmap_locked = extra;
2941         event->mmap_user = get_current_user();
2942         vma->vm_mm->locked_vm += event->mmap_locked;
2943
2944 unlock:
2945         if (!ret)
2946                 atomic_inc(&event->mmap_count);
2947         mutex_unlock(&event->mmap_mutex);
2948
2949         vma->vm_flags |= VM_RESERVED;
2950         vma->vm_ops = &perf_mmap_vmops;
2951
2952         return ret;
2953 }
2954
2955 static int perf_fasync(int fd, struct file *filp, int on)
2956 {
2957         struct inode *inode = filp->f_path.dentry->d_inode;
2958         struct perf_event *event = filp->private_data;
2959         int retval;
2960
2961         mutex_lock(&inode->i_mutex);
2962         retval = fasync_helper(fd, filp, on, &event->fasync);
2963         mutex_unlock(&inode->i_mutex);
2964
2965         if (retval < 0)
2966                 return retval;
2967
2968         return 0;
2969 }
2970
2971 static const struct file_operations perf_fops = {
2972         .llseek                 = no_llseek,
2973         .release                = perf_release,
2974         .read                   = perf_read,
2975         .poll                   = perf_poll,
2976         .unlocked_ioctl         = perf_ioctl,
2977         .compat_ioctl           = perf_ioctl,
2978         .mmap                   = perf_mmap,
2979         .fasync                 = perf_fasync,
2980 };
2981
2982 /*
2983  * Perf event wakeup
2984  *
2985  * If there's data, ensure we set the poll() state and publish everything
2986  * to user-space before waking everybody up.
2987  */
2988
2989 void perf_event_wakeup(struct perf_event *event)
2990 {
2991         wake_up_all(&event->waitq);
2992
2993         if (event->pending_kill) {
2994                 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
2995                 event->pending_kill = 0;
2996         }
2997 }
2998
2999 /*
3000  * Pending wakeups
3001  *
3002  * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
3003  *
3004  * The NMI bit means we cannot possibly take locks. Therefore, maintain a
3005  * single linked list and use cmpxchg() to add entries lockless.
3006  */
3007
3008 static void perf_pending_event(struct perf_pending_entry *entry)
3009 {
3010         struct perf_event *event = container_of(entry,
3011                         struct perf_event, pending);
3012
3013         if (event->pending_disable) {
3014                 event->pending_disable = 0;
3015                 __perf_event_disable(event);
3016         }
3017
3018         if (event->pending_wakeup) {
3019                 event->pending_wakeup = 0;
3020                 perf_event_wakeup(event);
3021         }
3022 }
3023
3024 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
3025
3026 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
3027         PENDING_TAIL,
3028 };
3029
3030 static void perf_pending_queue(struct perf_pending_entry *entry,
3031                                void (*func)(struct perf_pending_entry *))
3032 {
3033         struct perf_pending_entry **head;
3034
3035         if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
3036                 return;
3037
3038         entry->func = func;
3039
3040         head = &get_cpu_var(perf_pending_head);
3041
3042         do {
3043                 entry->next = *head;
3044         } while (cmpxchg(head, entry->next, entry) != entry->next);
3045
3046         set_perf_event_pending();
3047
3048         put_cpu_var(perf_pending_head);
3049 }
3050
3051 static int __perf_pending_run(void)
3052 {
3053         struct perf_pending_entry *list;
3054         int nr = 0;
3055
3056         list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
3057         while (list != PENDING_TAIL) {
3058                 void (*func)(struct perf_pending_entry *);
3059                 struct perf_pending_entry *entry = list;
3060
3061                 list = list->next;
3062
3063                 func = entry->func;
3064                 entry->next = NULL;
3065                 /*
3066                  * Ensure we observe the unqueue before we issue the wakeup,
3067                  * so that we won't be waiting forever.
3068                  * -- see perf_not_pending().
3069                  */
3070                 smp_wmb();
3071
3072                 func(entry);
3073                 nr++;
3074         }
3075
3076         return nr;
3077 }
3078
3079 static inline int perf_not_pending(struct perf_event *event)
3080 {
3081         /*
3082          * If we flush on whatever cpu we run, there is a chance we don't
3083          * need to wait.
3084          */
3085         get_cpu();
3086         __perf_pending_run();
3087         put_cpu();
3088
3089         /*
3090          * Ensure we see the proper queue state before going to sleep
3091          * so that we do not miss the wakeup. -- see perf_pending_handle()
3092          */
3093         smp_rmb();
3094         return event->pending.next == NULL;
3095 }
3096
3097 static void perf_pending_sync(struct perf_event *event)
3098 {
3099         wait_event(event->waitq, perf_not_pending(event));
3100 }
3101
3102 void perf_event_do_pending(void)
3103 {
3104         __perf_pending_run();
3105 }
3106
3107 /*
3108  * We assume there is only KVM supporting the callbacks.
3109  * Later on, we might change it to a list if there is
3110  * another virtualization implementation supporting the callbacks.
3111  */
3112 struct perf_guest_info_callbacks *perf_guest_cbs;
3113
3114 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3115 {
3116         perf_guest_cbs = cbs;
3117         return 0;
3118 }
3119 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3120
3121 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3122 {
3123         perf_guest_cbs = NULL;
3124         return 0;
3125 }
3126 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3127
3128 /*
3129  * Output
3130  */
3131 static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail,
3132                               unsigned long offset, unsigned long head)
3133 {
3134         unsigned long mask;
3135
3136         if (!buffer->writable)
3137                 return true;
3138
3139         mask = perf_data_size(buffer) - 1;
3140
3141         offset = (offset - tail) & mask;
3142         head   = (head   - tail) & mask;
3143
3144         if ((int)(head - offset) < 0)
3145                 return false;
3146
3147         return true;
3148 }
3149
3150 static void perf_output_wakeup(struct perf_output_handle *handle)
3151 {
3152         atomic_set(&handle->buffer->poll, POLL_IN);
3153
3154         if (handle->nmi) {
3155                 handle->event->pending_wakeup = 1;
3156                 perf_pending_queue(&handle->event->pending,
3157                                    perf_pending_event);
3158         } else
3159                 perf_event_wakeup(handle->event);
3160 }
3161
3162 /*
3163  * We need to ensure a later event_id doesn't publish a head when a former
3164  * event isn't done writing. However since we need to deal with NMIs we
3165  * cannot fully serialize things.
3166  *
3167  * We only publish the head (and generate a wakeup) when the outer-most
3168  * event completes.
3169  */
3170 static void perf_output_get_handle(struct perf_output_handle *handle)
3171 {
3172         struct perf_buffer *buffer = handle->buffer;
3173
3174         preempt_disable();
3175         local_inc(&buffer->nest);
3176         handle->wakeup = local_read(&buffer->wakeup);
3177 }
3178
3179 static void perf_output_put_handle(struct perf_output_handle *handle)
3180 {
3181         struct perf_buffer *buffer = handle->buffer;
3182         unsigned long head;
3183
3184 again:
3185         head = local_read(&buffer->head);
3186
3187         /*
3188          * IRQ/NMI can happen here, which means we can miss a head update.
3189          */
3190
3191         if (!local_dec_and_test(&buffer->nest))
3192                 goto out;
3193
3194         /*
3195          * Publish the known good head. Rely on the full barrier implied
3196          * by atomic_dec_and_test() order the buffer->head read and this
3197          * write.
3198          */
3199         buffer->user_page->data_head = head;
3200
3201         /*
3202          * Now check if we missed an update, rely on the (compiler)
3203          * barrier in atomic_dec_and_test() to re-read buffer->head.
3204          */
3205         if (unlikely(head != local_read(&buffer->head))) {
3206                 local_inc(&buffer->nest);
3207                 goto again;
3208         }
3209
3210         if (handle->wakeup != local_read(&buffer->wakeup))
3211                 perf_output_wakeup(handle);
3212
3213 out:
3214         preempt_enable();
3215 }
3216
3217 __always_inline void perf_output_copy(struct perf_output_handle *handle,
3218                       const void *buf, unsigned int len)
3219 {
3220         do {
3221                 unsigned long size = min_t(unsigned long, handle->size, len);
3222
3223                 memcpy(handle->addr, buf, size);
3224
3225                 len -= size;
3226                 handle->addr += size;
3227                 buf += size;
3228                 handle->size -= size;
3229                 if (!handle->size) {
3230                         struct perf_buffer *buffer = handle->buffer;
3231
3232                         handle->page++;
3233                         handle->page &= buffer->nr_pages - 1;
3234                         handle->addr = buffer->data_pages[handle->page];
3235                         handle->size = PAGE_SIZE << page_order(buffer);
3236                 }
3237         } while (len);
3238 }
3239
3240 int perf_output_begin(struct perf_output_handle *handle,
3241                       struct perf_event *event, unsigned int size,
3242                       int nmi, int sample)
3243 {
3244         struct perf_buffer *buffer;
3245         unsigned long tail, offset, head;
3246         int have_lost;
3247         struct {
3248                 struct perf_event_header header;
3249                 u64                      id;
3250                 u64                      lost;
3251         } lost_event;
3252
3253         rcu_read_lock();
3254         /*
3255          * For inherited events we send all the output towards the parent.
3256          */
3257         if (event->parent)
3258                 event = event->parent;
3259
3260         buffer = rcu_dereference(event->buffer);
3261         if (!buffer)
3262                 goto out;
3263
3264         handle->buffer  = buffer;
3265         handle->event   = event;
3266         handle->nmi     = nmi;
3267         handle->sample  = sample;
3268
3269         if (!buffer->nr_pages)
3270                 goto out;
3271
3272         have_lost = local_read(&buffer->lost);
3273         if (have_lost)
3274                 size += sizeof(lost_event);
3275
3276         perf_output_get_handle(handle);
3277
3278         do {
3279                 /*
3280                  * Userspace could choose to issue a mb() before updating the
3281                  * tail pointer. So that all reads will be completed before the
3282                  * write is issued.
3283                  */
3284                 tail = ACCESS_ONCE(buffer->user_page->data_tail);
3285                 smp_rmb();
3286                 offset = head = local_read(&buffer->head);
3287                 head += size;
3288                 if (unlikely(!perf_output_space(buffer, tail, offset, head)))
3289                         goto fail;
3290         } while (local_cmpxchg(&buffer->head, offset, head) != offset);
3291
3292         if (head - local_read(&buffer->wakeup) > buffer->watermark)
3293                 local_add(buffer->watermark, &buffer->wakeup);
3294
3295         handle->page = offset >> (PAGE_SHIFT + page_order(buffer));
3296         handle->page &= buffer->nr_pages - 1;
3297         handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1);
3298         handle->addr = buffer->data_pages[handle->page];
3299         handle->addr += handle->size;
3300         handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size;
3301
3302         if (have_lost) {
3303                 lost_event.header.type = PERF_RECORD_LOST;
3304                 lost_event.header.misc = 0;
3305                 lost_event.header.size = sizeof(lost_event);
3306                 lost_event.id          = event->id;
3307                 lost_event.lost        = local_xchg(&buffer->lost, 0);
3308
3309                 perf_output_put(handle, lost_event);
3310         }
3311
3312         return 0;
3313
3314 fail:
3315         local_inc(&buffer->lost);
3316         perf_output_put_handle(handle);
3317 out:
3318         rcu_read_unlock();
3319
3320         return -ENOSPC;
3321 }
3322
3323 void perf_output_end(struct perf_output_handle *handle)
3324 {
3325         struct perf_event *event = handle->event;
3326         struct perf_buffer *buffer = handle->buffer;
3327
3328         int wakeup_events = event->attr.wakeup_events;
3329
3330         if (handle->sample && wakeup_events) {
3331                 int events = local_inc_return(&buffer->events);
3332                 if (events >= wakeup_events) {
3333                         local_sub(wakeup_events, &buffer->events);
3334                         local_inc(&buffer->wakeup);
3335                 }
3336         }
3337
3338         perf_output_put_handle(handle);
3339         rcu_read_unlock();
3340 }
3341
3342 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
3343 {
3344         /*
3345          * only top level events have the pid namespace they were created in
3346          */
3347         if (event->parent)
3348                 event = event->parent;
3349
3350         return task_tgid_nr_ns(p, event->ns);
3351 }
3352
3353 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
3354 {
3355         /*
3356          * only top level events have the pid namespace they were created in
3357          */
3358         if (event->parent)
3359                 event = event->parent;
3360
3361         return task_pid_nr_ns(p, event->ns);
3362 }
3363
3364 static void perf_output_read_one(struct perf_output_handle *handle,
3365                                  struct perf_event *event)
3366 {
3367         u64 read_format = event->attr.read_format;
3368         u64 values[4];
3369         int n = 0;
3370
3371         values[n++] = perf_event_count(event);
3372         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3373                 values[n++] = event->total_time_enabled +
3374                         atomic64_read(&event->child_total_time_enabled);
3375         }
3376         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3377                 values[n++] = event->total_time_running +
3378                         atomic64_read(&event->child_total_time_running);
3379         }
3380         if (read_format & PERF_FORMAT_ID)
3381                 values[n++] = primary_event_id(event);
3382
3383         perf_output_copy(handle, values, n * sizeof(u64));
3384 }
3385
3386 /*
3387  * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3388  */
3389 static void perf_output_read_group(struct perf_output_handle *handle,
3390                             struct perf_event *event)
3391 {
3392         struct perf_event *leader = event->group_leader, *sub;
3393         u64 read_format = event->attr.read_format;
3394         u64 values[5];
3395         int n = 0;
3396
3397         values[n++] = 1 + leader->nr_siblings;
3398
3399         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3400                 values[n++] = leader->total_time_enabled;
3401
3402         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3403                 values[n++] = leader->total_time_running;
3404
3405         if (leader != event)
3406                 leader->pmu->read(leader);
3407
3408         values[n++] = perf_event_count(leader);
3409         if (read_format & PERF_FORMAT_ID)
3410                 values[n++] = primary_event_id(leader);
3411
3412         perf_output_copy(handle, values, n * sizeof(u64));
3413
3414         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3415                 n = 0;
3416
3417                 if (sub != event)
3418                         sub->pmu->read(sub);
3419
3420                 values[n++] = perf_event_count(sub);
3421                 if (read_format & PERF_FORMAT_ID)
3422                         values[n++] = primary_event_id(sub);
3423
3424                 perf_output_copy(handle, values, n * sizeof(u64));
3425         }
3426 }
3427
3428 static void perf_output_read(struct perf_output_handle *handle,
3429                              struct perf_event *event)
3430 {
3431         if (event->attr.read_format & PERF_FORMAT_GROUP)
3432                 perf_output_read_group(handle, event);
3433         else
3434                 perf_output_read_one(handle, event);
3435 }
3436
3437 void perf_output_sample(struct perf_output_handle *handle,
3438                         struct perf_event_header *header,
3439                         struct perf_sample_data *data,
3440                         struct perf_event *event)
3441 {
3442         u64 sample_type = data->type;
3443
3444         perf_output_put(handle, *header);
3445
3446         if (sample_type & PERF_SAMPLE_IP)
3447                 perf_output_put(handle, data->ip);
3448
3449         if (sample_type & PERF_SAMPLE_TID)
3450                 perf_output_put(handle, data->tid_entry);
3451
3452         if (sample_type & PERF_SAMPLE_TIME)
3453                 perf_output_put(handle, data->time);
3454
3455         if (sample_type & PERF_SAMPLE_ADDR)
3456                 perf_output_put(handle, data->addr);
3457
3458         if (sample_type & PERF_SAMPLE_ID)
3459                 perf_output_put(handle, data->id);
3460
3461         if (sample_type & PERF_SAMPLE_STREAM_ID)
3462                 perf_output_put(handle, data->stream_id);
3463
3464         if (sample_type & PERF_SAMPLE_CPU)
3465                 perf_output_put(handle, data->cpu_entry);
3466
3467         if (sample_type & PERF_SAMPLE_PERIOD)
3468                 perf_output_put(handle, data->period);
3469
3470         if (sample_type & PERF_SAMPLE_READ)
3471                 perf_output_read(handle, event);
3472
3473         if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3474                 if (data->callchain) {
3475                         int size = 1;
3476
3477                         if (data->callchain)
3478                                 size += data->callchain->nr;
3479
3480                         size *= sizeof(u64);
3481
3482                         perf_output_copy(handle, data->callchain, size);
3483                 } else {
3484                         u64 nr = 0;
3485                         perf_output_put(handle, nr);
3486                 }
3487         }
3488
3489         if (sample_type & PERF_SAMPLE_RAW) {
3490                 if (data->raw) {
3491                         perf_output_put(handle, data->raw->size);
3492                         perf_output_copy(handle, data->raw->data,
3493                                          data->raw->size);
3494                 } else {
3495                         struct {
3496                                 u32     size;
3497                                 u32     data;
3498                         } raw = {
3499                                 .size = sizeof(u32),
3500                                 .data = 0,
3501                         };
3502                         perf_output_put(handle, raw);
3503                 }
3504         }
3505 }
3506
3507 void perf_prepare_sample(struct perf_event_header *header,
3508                          struct perf_sample_data *data,
3509                          struct perf_event *event,
3510                          struct pt_regs *regs)
3511 {
3512         u64 sample_type = event->attr.sample_type;
3513
3514         data->type = sample_type;
3515
3516         header->type = PERF_RECORD_SAMPLE;
3517         header->size = sizeof(*header);
3518
3519         header->misc = 0;
3520         header->misc |= perf_misc_flags(regs);
3521
3522         if (sample_type & PERF_SAMPLE_IP) {
3523                 data->ip = perf_instruction_pointer(regs);
3524
3525                 header->size += sizeof(data->ip);
3526         }
3527
3528         if (sample_type & PERF_SAMPLE_TID) {
3529                 /* namespace issues */
3530                 data->tid_entry.pid = perf_event_pid(event, current);
3531                 data->tid_entry.tid = perf_event_tid(event, current);
3532
3533                 header->size += sizeof(data->tid_entry);
3534         }
3535
3536         if (sample_type & PERF_SAMPLE_TIME) {
3537                 data->time = perf_clock();
3538
3539                 header->size += sizeof(data->time);
3540         }
3541
3542         if (sample_type & PERF_SAMPLE_ADDR)
3543                 header->size += sizeof(data->addr);
3544
3545         if (sample_type & PERF_SAMPLE_ID) {
3546                 data->id = primary_event_id(event);
3547
3548                 header->size += sizeof(data->id);
3549         }
3550
3551         if (sample_type & PERF_SAMPLE_STREAM_ID) {
3552                 data->stream_id = event->id;
3553
3554                 header->size += sizeof(data->stream_id);
3555         }
3556
3557         if (sample_type & PERF_SAMPLE_CPU) {
3558                 data->cpu_entry.cpu             = raw_smp_processor_id();
3559                 data->cpu_entry.reserved        = 0;
3560
3561                 header->size += sizeof(data->cpu_entry);
3562         }
3563
3564         if (sample_type & PERF_SAMPLE_PERIOD)
3565                 header->size += sizeof(data->period);
3566
3567         if (sample_type & PERF_SAMPLE_READ)
3568                 header->size += perf_event_read_size(event);
3569
3570         if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3571                 int size = 1;
3572
3573                 data->callchain = perf_callchain(regs);
3574
3575                 if (data->callchain)
3576                         size += data->callchain->nr;
3577
3578                 header->size += size * sizeof(u64);
3579         }
3580
3581         if (sample_type & PERF_SAMPLE_RAW) {
3582                 int size = sizeof(u32);
3583
3584                 if (data->raw)
3585                         size += data->raw->size;
3586                 else
3587                         size += sizeof(u32);
3588
3589                 WARN_ON_ONCE(size & (sizeof(u64)-1));
3590                 header->size += size;
3591         }
3592 }
3593
3594 static void perf_event_output(struct perf_event *event, int nmi,
3595                                 struct perf_sample_data *data,
3596                                 struct pt_regs *regs)
3597 {
3598         struct perf_output_handle handle;
3599         struct perf_event_header header;
3600
3601         /* protect the callchain buffers */
3602         rcu_read_lock();
3603
3604         perf_prepare_sample(&header, data, event, regs);
3605
3606         if (perf_output_begin(&handle, event, header.size, nmi, 1))
3607                 goto exit;
3608
3609         perf_output_sample(&handle, &header, data, event);
3610
3611         perf_output_end(&handle);
3612
3613 exit:
3614         rcu_read_unlock();
3615 }
3616
3617 /*
3618  * read event_id
3619  */
3620
3621 struct perf_read_event {
3622         struct perf_event_header        header;
3623
3624         u32                             pid;
3625         u32                             tid;
3626 };
3627
3628 static void
3629 perf_event_read_event(struct perf_event *event,
3630                         struct task_struct *task)
3631 {
3632         struct perf_output_handle handle;
3633         struct perf_read_event read_event = {
3634                 .header = {
3635                         .type = PERF_RECORD_READ,
3636                         .misc = 0,
3637                         .size = sizeof(read_event) + perf_event_read_size(event),
3638                 },
3639                 .pid = perf_event_pid(event, task),
3640                 .tid = perf_event_tid(event, task),
3641         };
3642         int ret;
3643
3644         ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3645         if (ret)
3646                 return;
3647
3648         perf_output_put(&handle, read_event);
3649         perf_output_read(&handle, event);
3650
3651         perf_output_end(&handle);
3652 }
3653
3654 /*
3655  * task tracking -- fork/exit
3656  *
3657  * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
3658  */
3659
3660 struct perf_task_event {
3661         struct task_struct              *task;
3662         struct perf_event_context       *task_ctx;
3663
3664         struct {
3665                 struct perf_event_header        header;
3666
3667                 u32                             pid;
3668                 u32                             ppid;
3669                 u32                             tid;
3670                 u32                             ptid;
3671                 u64                             time;
3672         } event_id;
3673 };
3674
3675 static void perf_event_task_output(struct perf_event *event,
3676                                      struct perf_task_event *task_event)
3677 {
3678         struct perf_output_handle handle;
3679         struct task_struct *task = task_event->task;
3680         int size, ret;
3681
3682         size  = task_event->event_id.header.size;
3683         ret = perf_output_begin(&handle, event, size, 0, 0);
3684
3685         if (ret)
3686                 return;
3687
3688         task_event->event_id.pid = perf_event_pid(event, task);
3689         task_event->event_id.ppid = perf_event_pid(event, current);
3690
3691         task_event->event_id.tid = perf_event_tid(event, task);
3692         task_event->event_id.ptid = perf_event_tid(event, current);
3693
3694         perf_output_put(&handle, task_event->event_id);
3695
3696         perf_output_end(&handle);
3697 }
3698
3699 static int perf_event_task_match(struct perf_event *event)
3700 {
3701         if (event->state < PERF_EVENT_STATE_INACTIVE)
3702                 return 0;
3703
3704         if (event->cpu != -1 && event->cpu != smp_processor_id())
3705                 return 0;
3706
3707         if (event->attr.comm || event->attr.mmap ||
3708             event->attr.mmap_data || event->attr.task)
3709                 return 1;
3710
3711         return 0;
3712 }
3713
3714 static void perf_event_task_ctx(struct perf_event_context *ctx,
3715                                   struct perf_task_event *task_event)
3716 {
3717         struct perf_event *event;
3718
3719         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3720                 if (perf_event_task_match(event))
3721                         perf_event_task_output(event, task_event);
3722         }
3723 }
3724
3725 static void perf_event_task_event(struct perf_task_event *task_event)
3726 {
3727         struct perf_cpu_context *cpuctx;
3728         struct perf_event_context *ctx = task_event->task_ctx;
3729
3730         rcu_read_lock();
3731         cpuctx = &get_cpu_var(perf_cpu_context);
3732         perf_event_task_ctx(&cpuctx->ctx, task_event);
3733         if (!ctx)
3734                 ctx = rcu_dereference(current->perf_event_ctxp);
3735         if (ctx)
3736                 perf_event_task_ctx(ctx, task_event);
3737         put_cpu_var(perf_cpu_context);
3738         rcu_read_unlock();
3739 }
3740
3741 static void perf_event_task(struct task_struct *task,
3742                               struct perf_event_context *task_ctx,
3743                               int new)
3744 {
3745         struct perf_task_event task_event;
3746
3747         if (!atomic_read(&nr_comm_events) &&
3748             !atomic_read(&nr_mmap_events) &&
3749             !atomic_read(&nr_task_events))
3750                 return;
3751
3752         task_event = (struct perf_task_event){
3753                 .task     = task,
3754                 .task_ctx = task_ctx,
3755                 .event_id    = {
3756                         .header = {
3757                                 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3758                                 .misc = 0,
3759                                 .size = sizeof(task_event.event_id),
3760                         },
3761                         /* .pid  */
3762                         /* .ppid */
3763                         /* .tid  */
3764                         /* .ptid */
3765                         .time = perf_clock(),
3766                 },
3767         };
3768
3769         perf_event_task_event(&task_event);
3770 }
3771
3772 void perf_event_fork(struct task_struct *task)
3773 {
3774         perf_event_task(task, NULL, 1);
3775 }
3776
3777 /*
3778  * comm tracking
3779  */
3780
3781 struct perf_comm_event {
3782         struct task_struct      *task;
3783         char                    *comm;
3784         int                     comm_size;
3785
3786         struct {
3787                 struct perf_event_header        header;
3788
3789                 u32                             pid;
3790                 u32                             tid;
3791         } event_id;
3792 };
3793
3794 static void perf_event_comm_output(struct perf_event *event,
3795                                      struct perf_comm_event *comm_event)
3796 {
3797         struct perf_output_handle handle;
3798         int size = comm_event->event_id.header.size;
3799         int ret = perf_output_begin(&handle, event, size, 0, 0);
3800
3801         if (ret)
3802                 return;
3803
3804         comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
3805         comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
3806
3807         perf_output_put(&handle, comm_event->event_id);
3808         perf_output_copy(&handle, comm_event->comm,
3809                                    comm_event->comm_size);
3810         perf_output_end(&handle);
3811 }
3812
3813 static int perf_event_comm_match(struct perf_event *event)
3814 {
3815         if (event->state < PERF_EVENT_STATE_INACTIVE)
3816                 return 0;
3817
3818         if (event->cpu != -1 && event->cpu != smp_processor_id())
3819                 return 0;
3820
3821         if (event->attr.comm)
3822                 return 1;
3823
3824         return 0;
3825 }
3826
3827 static void perf_event_comm_ctx(struct perf_event_context *ctx,
3828                                   struct perf_comm_event *comm_event)
3829 {
3830         struct perf_event *event;
3831
3832         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3833                 if (perf_event_comm_match(event))
3834                         perf_event_comm_output(event, comm_event);
3835         }
3836 }
3837
3838 static void perf_event_comm_event(struct perf_comm_event *comm_event)
3839 {
3840         struct perf_cpu_context *cpuctx;
3841         struct perf_event_context *ctx;
3842         unsigned int size;
3843         char comm[TASK_COMM_LEN];
3844
3845         memset(comm, 0, sizeof(comm));
3846         strlcpy(comm, comm_event->task->comm, sizeof(comm));
3847         size = ALIGN(strlen(comm)+1, sizeof(u64));
3848
3849         comm_event->comm = comm;
3850         comm_event->comm_size = size;
3851
3852         comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3853
3854         rcu_read_lock();
3855         cpuctx = &get_cpu_var(perf_cpu_context);
3856         perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3857         ctx = rcu_dereference(current->perf_event_ctxp);
3858         if (ctx)
3859                 perf_event_comm_ctx(ctx, comm_event);
3860         put_cpu_var(perf_cpu_context);
3861         rcu_read_unlock();
3862 }
3863
3864 void perf_event_comm(struct task_struct *task)
3865 {
3866         struct perf_comm_event comm_event;
3867
3868         if (task->perf_event_ctxp)
3869                 perf_event_enable_on_exec(task);
3870
3871         if (!atomic_read(&nr_comm_events))
3872                 return;
3873
3874         comm_event = (struct perf_comm_event){
3875                 .task   = task,
3876                 /* .comm      */
3877                 /* .comm_size */
3878                 .event_id  = {
3879                         .header = {
3880                                 .type = PERF_RECORD_COMM,
3881                                 .misc = 0,
3882                                 /* .size */
3883                         },
3884                         /* .pid */
3885                         /* .tid */
3886                 },
3887         };
3888
3889         perf_event_comm_event(&comm_event);
3890 }
3891
3892 /*
3893  * mmap tracking
3894  */
3895
3896 struct perf_mmap_event {
3897         struct vm_area_struct   *vma;
3898
3899         const char              *file_name;
3900         int                     file_size;
3901
3902         struct {
3903                 struct perf_event_header        header;
3904
3905                 u32                             pid;
3906                 u32                             tid;
3907                 u64                             start;
3908                 u64                             len;
3909                 u64                             pgoff;
3910         } event_id;
3911 };
3912
3913 static void perf_event_mmap_output(struct perf_event *event,
3914                                      struct perf_mmap_event *mmap_event)
3915 {
3916         struct perf_output_handle handle;
3917         int size = mmap_event->event_id.header.size;
3918         int ret = perf_output_begin(&handle, event, size, 0, 0);
3919
3920         if (ret)
3921                 return;
3922
3923         mmap_event->event_id.pid = perf_event_pid(event, current);
3924         mmap_event->event_id.tid = perf_event_tid(event, current);
3925
3926         perf_output_put(&handle, mmap_event->event_id);
3927         perf_output_copy(&handle, mmap_event->file_name,
3928                                    mmap_event->file_size);
3929         perf_output_end(&handle);
3930 }
3931
3932 static int perf_event_mmap_match(struct perf_event *event,
3933                                    struct perf_mmap_event *mmap_event,
3934                                    int executable)
3935 {
3936         if (event->state < PERF_EVENT_STATE_INACTIVE)
3937                 return 0;
3938
3939         if (event->cpu != -1 && event->cpu != smp_processor_id())
3940                 return 0;
3941
3942         if ((!executable && event->attr.mmap_data) ||
3943             (executable && event->attr.mmap))
3944                 return 1;
3945
3946         return 0;
3947 }
3948
3949 static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3950                                   struct perf_mmap_event *mmap_event,
3951                                   int executable)
3952 {
3953         struct perf_event *event;
3954
3955         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3956                 if (perf_event_mmap_match(event, mmap_event, executable))
3957                         perf_event_mmap_output(event, mmap_event);
3958         }
3959 }
3960
3961 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
3962 {
3963         struct perf_cpu_context *cpuctx;
3964         struct perf_event_context *ctx;
3965         struct vm_area_struct *vma = mmap_event->vma;
3966         struct file *file = vma->vm_file;
3967         unsigned int size;
3968         char tmp[16];
3969         char *buf = NULL;
3970         const char *name;
3971
3972         memset(tmp, 0, sizeof(tmp));
3973
3974         if (file) {
3975                 /*
3976                  * d_path works from the end of the buffer backwards, so we
3977                  * need to add enough zero bytes after the string to handle
3978                  * the 64bit alignment we do later.
3979                  */
3980                 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
3981                 if (!buf) {
3982                         name = strncpy(tmp, "//enomem", sizeof(tmp));
3983                         goto got_name;
3984                 }
3985                 name = d_path(&file->f_path, buf, PATH_MAX);
3986                 if (IS_ERR(name)) {
3987                         name = strncpy(tmp, "//toolong", sizeof(tmp));
3988                         goto got_name;
3989                 }
3990         } else {
3991                 if (arch_vma_name(mmap_event->vma)) {
3992                         name = strncpy(tmp, arch_vma_name(mmap_event->vma),
3993                                        sizeof(tmp));
3994                         goto got_name;
3995                 }
3996
3997                 if (!vma->vm_mm) {
3998                         name = strncpy(tmp, "[vdso]", sizeof(tmp));
3999                         goto got_name;
4000                 } else if (vma->vm_start <= vma->vm_mm->start_brk &&
4001                                 vma->vm_end >= vma->vm_mm->brk) {
4002                         name = strncpy(tmp, "[heap]", sizeof(tmp));
4003                         goto got_name;
4004                 } else if (vma->vm_start <= vma->vm_mm->start_stack &&
4005                                 vma->vm_end >= vma->vm_mm->start_stack) {
4006                         name = strncpy(tmp, "[stack]", sizeof(tmp));
4007                         goto got_name;
4008                 }
4009
4010                 name = strncpy(tmp, "//anon", sizeof(tmp));
4011                 goto got_name;
4012         }
4013
4014 got_name:
4015         size = ALIGN(strlen(name)+1, sizeof(u64));
4016
4017         mmap_event->file_name = name;
4018         mmap_event->file_size = size;
4019
4020         mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4021
4022         rcu_read_lock();
4023         cpuctx = &get_cpu_var(perf_cpu_context);
4024         perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC);
4025         ctx = rcu_dereference(current->perf_event_ctxp);
4026         if (ctx)
4027                 perf_event_mmap_ctx(ctx, mmap_event, vma->vm_flags & VM_EXEC);
4028         put_cpu_var(perf_cpu_context);
4029         rcu_read_unlock();
4030
4031         kfree(buf);
4032 }
4033
4034 void perf_event_mmap(struct vm_area_struct *vma)
4035 {
4036         struct perf_mmap_event mmap_event;
4037
4038         if (!atomic_read(&nr_mmap_events))
4039                 return;
4040
4041         mmap_event = (struct perf_mmap_event){
4042                 .vma    = vma,
4043                 /* .file_name */
4044                 /* .file_size */
4045                 .event_id  = {
4046                         .header = {
4047                                 .type = PERF_RECORD_MMAP,
4048                                 .misc = PERF_RECORD_MISC_USER,
4049                                 /* .size */
4050                         },
4051                         /* .pid */
4052                         /* .tid */
4053                         .start  = vma->vm_start,
4054                         .len    = vma->vm_end - vma->vm_start,
4055                         .pgoff  = (u64)vma->vm_pgoff << PAGE_SHIFT,
4056                 },
4057         };
4058
4059         perf_event_mmap_event(&mmap_event);
4060 }
4061
4062 /*
4063  * IRQ throttle logging
4064  */
4065
4066 static void perf_log_throttle(struct perf_event *event, int enable)
4067 {
4068         struct perf_output_handle handle;
4069         int ret;
4070
4071         struct {
4072                 struct perf_event_header        header;
4073                 u64                             time;
4074                 u64                             id;
4075                 u64                             stream_id;
4076         } throttle_event = {
4077                 .header = {
4078                         .type = PERF_RECORD_THROTTLE,
4079                         .misc = 0,
4080                         .size = sizeof(throttle_event),
4081                 },
4082                 .time           = perf_clock(),
4083                 .id             = primary_event_id(event),
4084                 .stream_id      = event->id,
4085         };
4086
4087         if (enable)
4088                 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4089
4090         ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
4091         if (ret)
4092                 return;
4093
4094         perf_output_put(&handle, throttle_event);
4095         perf_output_end(&handle);
4096 }
4097
4098 /*
4099  * Generic event overflow handling, sampling.
4100  */
4101
4102 static int __perf_event_overflow(struct perf_event *event, int nmi,
4103                                    int throttle, struct perf_sample_data *data,
4104                                    struct pt_regs *regs)
4105 {
4106         int events = atomic_read(&event->event_limit);
4107         struct hw_perf_event *hwc = &event->hw;
4108         int ret = 0;
4109
4110         if (!throttle) {
4111                 hwc->interrupts++;
4112         } else {
4113                 if (hwc->interrupts != MAX_INTERRUPTS) {
4114                         hwc->interrupts++;
4115                         if (HZ * hwc->interrupts >
4116                                         (u64)sysctl_perf_event_sample_rate) {
4117                                 hwc->interrupts = MAX_INTERRUPTS;
4118                                 perf_log_throttle(event, 0);
4119                                 ret = 1;
4120                         }
4121                 } else {
4122                         /*
4123                          * Keep re-disabling events even though on the previous
4124                          * pass we disabled it - just in case we raced with a
4125                          * sched-in and the event got enabled again:
4126                          */
4127                         ret = 1;
4128                 }
4129         }
4130
4131         if (event->attr.freq) {
4132                 u64 now = perf_clock();
4133                 s64 delta = now - hwc->freq_time_stamp;
4134
4135                 hwc->freq_time_stamp = now;
4136
4137                 if (delta > 0 && delta < 2*TICK_NSEC)
4138                         perf_adjust_period(event, delta, hwc->last_period);
4139         }
4140
4141         /*
4142          * XXX event_limit might not quite work as expected on inherited
4143          * events
4144          */
4145
4146         event->pending_kill = POLL_IN;
4147         if (events && atomic_dec_and_test(&event->event_limit)) {
4148                 ret = 1;
4149                 event->pending_kill = POLL_HUP;
4150                 if (nmi) {
4151                         event->pending_disable = 1;
4152                         perf_pending_queue(&event->pending,
4153                                            perf_pending_event);
4154                 } else
4155                         perf_event_disable(event);
4156         }
4157
4158         if (event->overflow_handler)
4159                 event->overflow_handler(event, nmi, data, regs);
4160         else
4161                 perf_event_output(event, nmi, data, regs);
4162
4163         return ret;
4164 }
4165
4166 int perf_event_overflow(struct perf_event *event, int nmi,
4167                           struct perf_sample_data *data,
4168                           struct pt_regs *regs)
4169 {
4170         return __perf_event_overflow(event, nmi, 1, data, regs);
4171 }
4172
4173 /*
4174  * Generic software event infrastructure
4175  */
4176
4177 /*
4178  * We directly increment event->count and keep a second value in
4179  * event->hw.period_left to count intervals. This period event
4180  * is kept in the range [-sample_period, 0] so that we can use the
4181  * sign as trigger.
4182  */
4183
4184 static u64 perf_swevent_set_period(struct perf_event *event)
4185 {
4186         struct hw_perf_event *hwc = &event->hw;
4187         u64 period = hwc->last_period;
4188         u64 nr, offset;
4189         s64 old, val;
4190
4191         hwc->last_period = hwc->sample_period;
4192
4193 again:
4194         old = val = local64_read(&hwc->period_left);
4195         if (val < 0)
4196                 return 0;
4197
4198         nr = div64_u64(period + val, period);
4199         offset = nr * period;
4200         val -= offset;
4201         if (local64_cmpxchg(&hwc->period_left, old, val) != old)
4202                 goto again;
4203
4204         return nr;
4205 }
4206
4207 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
4208                                     int nmi, struct perf_sample_data *data,
4209                                     struct pt_regs *regs)
4210 {
4211         struct hw_perf_event *hwc = &event->hw;
4212         int throttle = 0;
4213
4214         data->period = event->hw.last_period;
4215         if (!overflow)
4216                 overflow = perf_swevent_set_period(event);
4217
4218         if (hwc->interrupts == MAX_INTERRUPTS)
4219                 return;
4220
4221         for (; overflow; overflow--) {
4222                 if (__perf_event_overflow(event, nmi, throttle,
4223                                             data, regs)) {
4224                         /*
4225                          * We inhibit the overflow from happening when
4226                          * hwc->interrupts == MAX_INTERRUPTS.
4227                          */
4228                         break;
4229                 }
4230                 throttle = 1;
4231         }
4232 }
4233
4234 static void perf_swevent_event(struct perf_event *event, u64 nr,
4235                                int nmi, struct perf_sample_data *data,
4236                                struct pt_regs *regs)
4237 {
4238         struct hw_perf_event *hwc = &event->hw;
4239
4240         local64_add(nr, &event->count);
4241
4242         if (!regs)
4243                 return;
4244
4245         if (!hwc->sample_period)
4246                 return;
4247
4248         if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4249                 return perf_swevent_overflow(event, 1, nmi, data, regs);
4250
4251         if (local64_add_negative(nr, &hwc->period_left))
4252                 return;
4253
4254         perf_swevent_overflow(event, 0, nmi, data, regs);
4255 }
4256
4257 static int perf_exclude_event(struct perf_event *event,
4258                               struct pt_regs *regs)
4259 {
4260         if (event->hw.state & PERF_HES_STOPPED)
4261                 return 0;
4262
4263         if (regs) {
4264                 if (event->attr.exclude_user && user_mode(regs))
4265                         return 1;
4266
4267                 if (event->attr.exclude_kernel && !user_mode(regs))
4268                         return 1;
4269         }
4270
4271         return 0;
4272 }
4273
4274 static int perf_swevent_match(struct perf_event *event,
4275                                 enum perf_type_id type,
4276                                 u32 event_id,
4277                                 struct perf_sample_data *data,
4278                                 struct pt_regs *regs)
4279 {
4280         if (event->attr.type != type)
4281                 return 0;
4282
4283         if (event->attr.config != event_id)
4284                 return 0;
4285
4286         if (perf_exclude_event(event, regs))
4287                 return 0;
4288
4289         return 1;
4290 }
4291
4292 static inline u64 swevent_hash(u64 type, u32 event_id)
4293 {
4294         u64 val = event_id | (type << 32);
4295
4296         return hash_64(val, SWEVENT_HLIST_BITS);
4297 }
4298
4299 static inline struct hlist_head *
4300 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
4301 {
4302         u64 hash = swevent_hash(type, event_id);
4303
4304         return &hlist->heads[hash];
4305 }
4306
4307 /* For the read side: events when they trigger */
4308 static inline struct hlist_head *
4309 find_swevent_head_rcu(struct perf_cpu_context *ctx, u64 type, u32 event_id)
4310 {
4311         struct swevent_hlist *hlist;
4312
4313         hlist = rcu_dereference(ctx->swevent_hlist);
4314         if (!hlist)
4315                 return NULL;
4316
4317         return __find_swevent_head(hlist, type, event_id);
4318 }
4319
4320 /* For the event head insertion and removal in the hlist */
4321 static inline struct hlist_head *
4322 find_swevent_head(struct perf_cpu_context *ctx, struct perf_event *event)
4323 {
4324         struct swevent_hlist *hlist;
4325         u32 event_id = event->attr.config;
4326         u64 type = event->attr.type;
4327
4328         /*
4329          * Event scheduling is always serialized against hlist allocation
4330          * and release. Which makes the protected version suitable here.
4331          * The context lock guarantees that.
4332          */
4333         hlist = rcu_dereference_protected(ctx->swevent_hlist,
4334                                           lockdep_is_held(&event->ctx->lock));
4335         if (!hlist)
4336                 return NULL;
4337
4338         return __find_swevent_head(hlist, type, event_id);
4339 }
4340
4341 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4342                                     u64 nr, int nmi,
4343                                     struct perf_sample_data *data,
4344                                     struct pt_regs *regs)
4345 {
4346         struct perf_cpu_context *cpuctx;
4347         struct perf_event *event;
4348         struct hlist_node *node;
4349         struct hlist_head *head;
4350
4351         cpuctx = &__get_cpu_var(perf_cpu_context);
4352
4353         rcu_read_lock();
4354
4355         head = find_swevent_head_rcu(cpuctx, type, event_id);
4356
4357         if (!head)
4358                 goto end;
4359
4360         hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4361                 if (perf_swevent_match(event, type, event_id, data, regs))
4362                         perf_swevent_event(event, nr, nmi, data, regs);
4363         }
4364 end:
4365         rcu_read_unlock();
4366 }
4367
4368 int perf_swevent_get_recursion_context(void)
4369 {
4370         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4371
4372         return get_recursion_context(cpuctx->recursion);
4373 }
4374 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
4375
4376 void inline perf_swevent_put_recursion_context(int rctx)
4377 {
4378         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4379
4380         put_recursion_context(cpuctx->recursion, rctx);
4381 }
4382
4383 void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4384                             struct pt_regs *regs, u64 addr)
4385 {
4386         struct perf_sample_data data;
4387         int rctx;
4388
4389         preempt_disable_notrace();
4390         rctx = perf_swevent_get_recursion_context();
4391         if (rctx < 0)
4392                 return;
4393
4394         perf_sample_data_init(&data, addr);
4395
4396         do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
4397
4398         perf_swevent_put_recursion_context(rctx);
4399         preempt_enable_notrace();
4400 }
4401
4402 static void perf_swevent_read(struct perf_event *event)
4403 {
4404 }
4405
4406 static int perf_swevent_add(struct perf_event *event, int flags)
4407 {
4408         struct hw_perf_event *hwc = &event->hw;
4409         struct perf_cpu_context *cpuctx;
4410         struct hlist_head *head;
4411
4412         cpuctx = &__get_cpu_var(perf_cpu_context);
4413
4414         if (hwc->sample_period) {
4415                 hwc->last_period = hwc->sample_period;
4416                 perf_swevent_set_period(event);
4417         }
4418
4419         hwc->state = !(flags & PERF_EF_START);
4420
4421         head = find_swevent_head(cpuctx, event);
4422         if (WARN_ON_ONCE(!head))
4423                 return -EINVAL;
4424
4425         hlist_add_head_rcu(&event->hlist_entry, head);
4426
4427         return 0;
4428 }
4429
4430 static void perf_swevent_del(struct perf_event *event, int flags)
4431 {
4432         hlist_del_rcu(&event->hlist_entry);
4433 }
4434
4435 static void perf_swevent_start(struct perf_event *event, int flags)
4436 {
4437         event->hw.state = 0;
4438 }
4439
4440 static void perf_swevent_stop(struct perf_event *event, int flags)
4441 {
4442         event->hw.state = PERF_HES_STOPPED;
4443 }
4444
4445 /* Deref the hlist from the update side */
4446 static inline struct swevent_hlist *
4447 swevent_hlist_deref(struct perf_cpu_context *cpuctx)
4448 {
4449         return rcu_dereference_protected(cpuctx->swevent_hlist,
4450                                          lockdep_is_held(&cpuctx->hlist_mutex));
4451 }
4452
4453 static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
4454 {
4455         struct swevent_hlist *hlist;
4456
4457         hlist = container_of(rcu_head, struct swevent_hlist, rcu_head);
4458         kfree(hlist);
4459 }
4460
4461 static void swevent_hlist_release(struct perf_cpu_context *cpuctx)
4462 {
4463         struct swevent_hlist *hlist = swevent_hlist_deref(cpuctx);
4464
4465         if (!hlist)
4466                 return;
4467
4468         rcu_assign_pointer(cpuctx->swevent_hlist, NULL);
4469         call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
4470 }
4471
4472 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
4473 {
4474         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4475
4476         mutex_lock(&cpuctx->hlist_mutex);
4477
4478         if (!--cpuctx->hlist_refcount)
4479                 swevent_hlist_release(cpuctx);
4480
4481         mutex_unlock(&cpuctx->hlist_mutex);
4482 }
4483
4484 static void swevent_hlist_put(struct perf_event *event)
4485 {
4486         int cpu;
4487
4488         if (event->cpu != -1) {
4489                 swevent_hlist_put_cpu(event, event->cpu);
4490                 return;
4491         }
4492
4493         for_each_possible_cpu(cpu)
4494                 swevent_hlist_put_cpu(event, cpu);
4495 }
4496
4497 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
4498 {
4499         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4500         int err = 0;
4501
4502         mutex_lock(&cpuctx->hlist_mutex);
4503
4504         if (!swevent_hlist_deref(cpuctx) && cpu_online(cpu)) {
4505                 struct swevent_hlist *hlist;
4506
4507                 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
4508                 if (!hlist) {
4509                         err = -ENOMEM;
4510                         goto exit;
4511                 }
4512                 rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
4513         }
4514         cpuctx->hlist_refcount++;
4515 exit:
4516         mutex_unlock(&cpuctx->hlist_mutex);
4517
4518         return err;
4519 }
4520
4521 static int swevent_hlist_get(struct perf_event *event)
4522 {
4523         int err;
4524         int cpu, failed_cpu;
4525
4526         if (event->cpu != -1)
4527                 return swevent_hlist_get_cpu(event, event->cpu);
4528
4529         get_online_cpus();
4530         for_each_possible_cpu(cpu) {
4531                 err = swevent_hlist_get_cpu(event, cpu);
4532                 if (err) {
4533                         failed_cpu = cpu;
4534                         goto fail;
4535                 }
4536         }
4537         put_online_cpus();
4538
4539         return 0;
4540 fail:
4541         for_each_possible_cpu(cpu) {
4542                 if (cpu == failed_cpu)
4543                         break;
4544                 swevent_hlist_put_cpu(event, cpu);
4545         }
4546
4547         put_online_cpus();
4548         return err;
4549 }
4550
4551 atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
4552
4553 static void sw_perf_event_destroy(struct perf_event *event)
4554 {
4555         u64 event_id = event->attr.config;
4556
4557         WARN_ON(event->parent);
4558
4559         atomic_dec(&perf_swevent_enabled[event_id]);
4560         swevent_hlist_put(event);
4561 }
4562
4563 static int perf_swevent_init(struct perf_event *event)
4564 {
4565         int event_id = event->attr.config;
4566
4567         if (event->attr.type != PERF_TYPE_SOFTWARE)
4568                 return -ENOENT;
4569
4570         switch (event_id) {
4571         case PERF_COUNT_SW_CPU_CLOCK:
4572         case PERF_COUNT_SW_TASK_CLOCK:
4573                 return -ENOENT;
4574
4575         default:
4576                 break;
4577         }
4578
4579         if (event_id > PERF_COUNT_SW_MAX)
4580                 return -ENOENT;
4581
4582         if (!event->parent) {
4583                 int err;
4584
4585                 err = swevent_hlist_get(event);
4586                 if (err)
4587                         return err;
4588
4589                 atomic_inc(&perf_swevent_enabled[event_id]);
4590                 event->destroy = sw_perf_event_destroy;
4591         }
4592
4593         return 0;
4594 }
4595
4596 static struct pmu perf_swevent = {
4597         .event_init     = perf_swevent_init,
4598         .add            = perf_swevent_add,
4599         .del            = perf_swevent_del,
4600         .start          = perf_swevent_start,
4601         .stop           = perf_swevent_stop,
4602         .read           = perf_swevent_read,
4603 };
4604
4605 #ifdef CONFIG_EVENT_TRACING
4606
4607 static int perf_tp_filter_match(struct perf_event *event,
4608                                 struct perf_sample_data *data)
4609 {
4610         void *record = data->raw->data;
4611
4612         if (likely(!event->filter) || filter_match_preds(event->filter, record))
4613                 return 1;
4614         return 0;
4615 }
4616
4617 static int perf_tp_event_match(struct perf_event *event,
4618                                 struct perf_sample_data *data,
4619                                 struct pt_regs *regs)
4620 {
4621         /*
4622          * All tracepoints are from kernel-space.
4623          */
4624         if (event->attr.exclude_kernel)
4625                 return 0;
4626
4627         if (!perf_tp_filter_match(event, data))
4628                 return 0;
4629
4630         return 1;
4631 }
4632
4633 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
4634                    struct pt_regs *regs, struct hlist_head *head, int rctx)
4635 {
4636         struct perf_sample_data data;
4637         struct perf_event *event;
4638         struct hlist_node *node;
4639
4640         struct perf_raw_record raw = {
4641                 .size = entry_size,
4642                 .data = record,
4643         };
4644
4645         perf_sample_data_init(&data, addr);
4646         data.raw = &raw;
4647
4648         hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4649                 if (perf_tp_event_match(event, &data, regs))
4650                         perf_swevent_event(event, count, 1, &data, regs);
4651         }
4652
4653         perf_swevent_put_recursion_context(rctx);
4654 }
4655 EXPORT_SYMBOL_GPL(perf_tp_event);
4656
4657 static void tp_perf_event_destroy(struct perf_event *event)
4658 {
4659         perf_trace_destroy(event);
4660 }
4661
4662 static int perf_tp_event_init(struct perf_event *event)
4663 {
4664         int err;
4665
4666         if (event->attr.type != PERF_TYPE_TRACEPOINT)
4667                 return -ENOENT;
4668
4669         /*
4670          * Raw tracepoint data is a severe data leak, only allow root to
4671          * have these.
4672          */
4673         if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
4674                         perf_paranoid_tracepoint_raw() &&
4675                         !capable(CAP_SYS_ADMIN))
4676                 return -EPERM;
4677
4678         err = perf_trace_init(event);
4679         if (err)
4680                 return err;
4681
4682         event->destroy = tp_perf_event_destroy;
4683
4684         return 0;
4685 }
4686
4687 static struct pmu perf_tracepoint = {
4688         .event_init     = perf_tp_event_init,
4689         .add            = perf_trace_add,
4690         .del            = perf_trace_del,
4691         .start          = perf_swevent_start,
4692         .stop           = perf_swevent_stop,
4693         .read           = perf_swevent_read,
4694 };
4695
4696 static inline void perf_tp_register(void)
4697 {
4698         perf_pmu_register(&perf_tracepoint);
4699 }
4700
4701 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4702 {
4703         char *filter_str;
4704         int ret;
4705
4706         if (event->attr.type != PERF_TYPE_TRACEPOINT)
4707                 return -EINVAL;
4708
4709         filter_str = strndup_user(arg, PAGE_SIZE);
4710         if (IS_ERR(filter_str))
4711                 return PTR_ERR(filter_str);
4712
4713         ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
4714
4715         kfree(filter_str);
4716         return ret;
4717 }
4718
4719 static void perf_event_free_filter(struct perf_event *event)
4720 {
4721         ftrace_profile_free_filter(event);
4722 }
4723
4724 #else
4725
4726 static inline void perf_tp_register(void)
4727 {
4728 }
4729
4730 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4731 {
4732         return -ENOENT;
4733 }
4734
4735 static void perf_event_free_filter(struct perf_event *event)
4736 {
4737 }
4738
4739 #endif /* CONFIG_EVENT_TRACING */
4740
4741 #ifdef CONFIG_HAVE_HW_BREAKPOINT
4742 void perf_bp_event(struct perf_event *bp, void *data)
4743 {
4744         struct perf_sample_data sample;
4745         struct pt_regs *regs = data;
4746
4747         perf_sample_data_init(&sample, bp->attr.bp_addr);
4748
4749         if (!bp->hw.state && !perf_exclude_event(bp, regs))
4750                 perf_swevent_event(bp, 1, 1, &sample, regs);
4751 }
4752 #endif
4753
4754 /*
4755  * hrtimer based swevent callback
4756  */
4757
4758 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
4759 {
4760         enum hrtimer_restart ret = HRTIMER_RESTART;
4761         struct perf_sample_data data;
4762         struct pt_regs *regs;
4763         struct perf_event *event;
4764         u64 period;
4765
4766         event = container_of(hrtimer, struct perf_event, hw.hrtimer);
4767         event->pmu->read(event);
4768
4769         perf_sample_data_init(&data, 0);
4770         data.period = event->hw.last_period;
4771         regs = get_irq_regs();
4772
4773         if (regs && !perf_exclude_event(event, regs)) {
4774                 if (!(event->attr.exclude_idle && current->pid == 0))
4775                         if (perf_event_overflow(event, 0, &data, regs))
4776                                 ret = HRTIMER_NORESTART;
4777         }
4778
4779         period = max_t(u64, 10000, event->hw.sample_period);
4780         hrtimer_forward_now(hrtimer, ns_to_ktime(period));
4781
4782         return ret;
4783 }
4784
4785 static void perf_swevent_start_hrtimer(struct perf_event *event)
4786 {
4787         struct hw_perf_event *hwc = &event->hw;
4788
4789         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4790         hwc->hrtimer.function = perf_swevent_hrtimer;
4791         if (hwc->sample_period) {
4792                 s64 period = local64_read(&hwc->period_left);
4793
4794                 if (period) {
4795                         if (period < 0)
4796                                 period = 10000;
4797
4798                         local64_set(&hwc->period_left, 0);
4799                 } else {
4800                         period = max_t(u64, 10000, hwc->sample_period);
4801                 }
4802                 __hrtimer_start_range_ns(&hwc->hrtimer,
4803                                 ns_to_ktime(period), 0,
4804                                 HRTIMER_MODE_REL, 0);
4805         }
4806 }
4807
4808 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
4809 {
4810         struct hw_perf_event *hwc = &event->hw;
4811
4812         if (hwc->sample_period) {
4813                 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
4814                 local64_set(&hwc->period_left, ktime_to_ns(remaining));
4815
4816                 hrtimer_cancel(&hwc->hrtimer);
4817         }
4818 }
4819
4820 /*
4821  * Software event: cpu wall time clock
4822  */
4823
4824 static void cpu_clock_event_update(struct perf_event *event)
4825 {
4826         s64 prev;
4827         u64 now;
4828
4829         now = local_clock();
4830         prev = local64_xchg(&event->hw.prev_count, now);
4831         local64_add(now - prev, &event->count);
4832 }
4833
4834 static void cpu_clock_event_start(struct perf_event *event, int flags)
4835 {
4836         local64_set(&event->hw.prev_count, local_clock());
4837         perf_swevent_start_hrtimer(event);
4838 }
4839
4840 static void cpu_clock_event_stop(struct perf_event *event, int flags)
4841 {
4842         perf_swevent_cancel_hrtimer(event);
4843         cpu_clock_event_update(event);
4844 }
4845
4846 static int cpu_clock_event_add(struct perf_event *event, int flags)
4847 {
4848         if (flags & PERF_EF_START)
4849                 cpu_clock_event_start(event, flags);
4850
4851         return 0;
4852 }
4853
4854 static void cpu_clock_event_del(struct perf_event *event, int flags)
4855 {
4856         cpu_clock_event_stop(event, flags);
4857 }
4858
4859 static void cpu_clock_event_read(struct perf_event *event)
4860 {
4861         cpu_clock_event_update(event);
4862 }
4863
4864 static int cpu_clock_event_init(struct perf_event *event)
4865 {
4866         if (event->attr.type != PERF_TYPE_SOFTWARE)
4867                 return -ENOENT;
4868
4869         if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
4870                 return -ENOENT;
4871
4872         return 0;
4873 }
4874
4875 static struct pmu perf_cpu_clock = {
4876         .event_init     = cpu_clock_event_init,
4877         .add            = cpu_clock_event_add,
4878         .del            = cpu_clock_event_del,
4879         .start          = cpu_clock_event_start,
4880         .stop           = cpu_clock_event_stop,
4881         .read           = cpu_clock_event_read,
4882 };
4883
4884 /*
4885  * Software event: task time clock
4886  */
4887
4888 static void task_clock_event_update(struct perf_event *event, u64 now)
4889 {
4890         u64 prev;
4891         s64 delta;
4892
4893         prev = local64_xchg(&event->hw.prev_count, now);
4894         delta = now - prev;
4895         local64_add(delta, &event->count);
4896 }
4897
4898 static void task_clock_event_start(struct perf_event *event, int flags)
4899 {
4900         local64_set(&event->hw.prev_count, event->ctx->time);
4901         perf_swevent_start_hrtimer(event);
4902 }
4903
4904 static void task_clock_event_stop(struct perf_event *event, int flags)
4905 {
4906         perf_swevent_cancel_hrtimer(event);
4907         task_clock_event_update(event, event->ctx->time);
4908 }
4909
4910 static int task_clock_event_add(struct perf_event *event, int flags)
4911 {
4912         if (flags & PERF_EF_START)
4913                 task_clock_event_start(event, flags);
4914
4915         return 0;
4916 }
4917
4918 static void task_clock_event_del(struct perf_event *event, int flags)
4919 {
4920         task_clock_event_stop(event, PERF_EF_UPDATE);
4921 }
4922
4923 static void task_clock_event_read(struct perf_event *event)
4924 {
4925         u64 time;
4926
4927         if (!in_nmi()) {
4928                 update_context_time(event->ctx);
4929                 time = event->ctx->time;
4930         } else {
4931                 u64 now = perf_clock();
4932                 u64 delta = now - event->ctx->timestamp;
4933                 time = event->ctx->time + delta;
4934         }
4935
4936         task_clock_event_update(event, time);
4937 }
4938
4939 static int task_clock_event_init(struct perf_event *event)
4940 {
4941         if (event->attr.type != PERF_TYPE_SOFTWARE)
4942                 return -ENOENT;
4943
4944         if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
4945                 return -ENOENT;
4946
4947         return 0;
4948 }
4949
4950 static struct pmu perf_task_clock = {
4951         .event_init     = task_clock_event_init,
4952         .add            = task_clock_event_add,
4953         .del            = task_clock_event_del,
4954         .start          = task_clock_event_start,
4955         .stop           = task_clock_event_stop,
4956         .read           = task_clock_event_read,
4957 };
4958
4959 static LIST_HEAD(pmus);
4960 static DEFINE_MUTEX(pmus_lock);
4961 static struct srcu_struct pmus_srcu;
4962
4963 static void perf_pmu_nop_void(struct pmu *pmu)
4964 {
4965 }
4966
4967 static int perf_pmu_nop_int(struct pmu *pmu)
4968 {
4969         return 0;
4970 }
4971
4972 static void perf_pmu_start_txn(struct pmu *pmu)
4973 {
4974         perf_pmu_disable(pmu);
4975 }
4976
4977 static int perf_pmu_commit_txn(struct pmu *pmu)
4978 {
4979         perf_pmu_enable(pmu);
4980         return 0;
4981 }
4982
4983 static void perf_pmu_cancel_txn(struct pmu *pmu)
4984 {
4985         perf_pmu_enable(pmu);
4986 }
4987
4988 int perf_pmu_register(struct pmu *pmu)
4989 {
4990         int ret;
4991
4992         mutex_lock(&pmus_lock);
4993         ret = -ENOMEM;
4994         pmu->pmu_disable_count = alloc_percpu(int);
4995         if (!pmu->pmu_disable_count)
4996                 goto unlock;
4997
4998         if (!pmu->start_txn) {
4999                 if (pmu->pmu_enable) {
5000                         /*
5001                          * If we have pmu_enable/pmu_disable calls, install
5002                          * transaction stubs that use that to try and batch
5003                          * hardware accesses.
5004                          */
5005                         pmu->start_txn  = perf_pmu_start_txn;
5006                         pmu->commit_txn = perf_pmu_commit_txn;
5007                         pmu->cancel_txn = perf_pmu_cancel_txn;
5008                 } else {
5009                         pmu->start_txn  = perf_pmu_nop_void;
5010                         pmu->commit_txn = perf_pmu_nop_int;
5011                         pmu->cancel_txn = perf_pmu_nop_void;
5012                 }
5013         }
5014
5015         if (!pmu->pmu_enable) {
5016                 pmu->pmu_enable  = perf_pmu_nop_void;
5017                 pmu->pmu_disable = perf_pmu_nop_void;
5018         }
5019
5020         list_add_rcu(&pmu->entry, &pmus);
5021         ret = 0;
5022 unlock:
5023         mutex_unlock(&pmus_lock);
5024
5025         return ret;
5026 }
5027
5028 void perf_pmu_unregister(struct pmu *pmu)
5029 {
5030         mutex_lock(&pmus_lock);
5031         list_del_rcu(&pmu->entry);
5032         mutex_unlock(&pmus_lock);
5033
5034         synchronize_srcu(&pmus_srcu);
5035
5036         free_percpu(pmu->pmu_disable_count);
5037 }
5038
5039 struct pmu *perf_init_event(struct perf_event *event)
5040 {
5041         struct pmu *pmu = NULL;
5042         int idx;
5043
5044         idx = srcu_read_lock(&pmus_srcu);
5045         list_for_each_entry_rcu(pmu, &pmus, entry) {
5046                 int ret = pmu->event_init(event);
5047                 if (!ret)
5048                         break;
5049                 if (ret != -ENOENT) {
5050                         pmu = ERR_PTR(ret);
5051                         break;
5052                 }
5053         }
5054         srcu_read_unlock(&pmus_srcu, idx);
5055
5056         return pmu;
5057 }
5058
5059 /*
5060  * Allocate and initialize a event structure
5061  */
5062 static struct perf_event *
5063 perf_event_alloc(struct perf_event_attr *attr,
5064                    int cpu,
5065                    struct perf_event_context *ctx,
5066                    struct perf_event *group_leader,
5067                    struct perf_event *parent_event,
5068                    perf_overflow_handler_t overflow_handler,
5069                    gfp_t gfpflags)
5070 {
5071         struct pmu *pmu;
5072         struct perf_event *event;
5073         struct hw_perf_event *hwc;
5074         long err;
5075
5076         event = kzalloc(sizeof(*event), gfpflags);
5077         if (!event)
5078                 return ERR_PTR(-ENOMEM);
5079
5080         /*
5081          * Single events are their own group leaders, with an
5082          * empty sibling list:
5083          */
5084         if (!group_leader)
5085                 group_leader = event;
5086
5087         mutex_init(&event->child_mutex);
5088         INIT_LIST_HEAD(&event->child_list);
5089
5090         INIT_LIST_HEAD(&event->group_entry);
5091         INIT_LIST_HEAD(&event->event_entry);
5092         INIT_LIST_HEAD(&event->sibling_list);
5093         init_waitqueue_head(&event->waitq);
5094
5095         mutex_init(&event->mmap_mutex);
5096
5097         event->cpu              = cpu;
5098         event->attr             = *attr;
5099         event->group_leader     = group_leader;
5100         event->pmu              = NULL;
5101         event->ctx              = ctx;
5102         event->oncpu            = -1;
5103
5104         event->parent           = parent_event;
5105
5106         event->ns               = get_pid_ns(current->nsproxy->pid_ns);
5107         event->id               = atomic64_inc_return(&perf_event_id);
5108
5109         event->state            = PERF_EVENT_STATE_INACTIVE;
5110
5111         if (!overflow_handler && parent_event)
5112                 overflow_handler = parent_event->overflow_handler;
5113         
5114         event->overflow_handler = overflow_handler;
5115
5116         if (attr->disabled)
5117                 event->state = PERF_EVENT_STATE_OFF;
5118
5119         pmu = NULL;
5120
5121         hwc = &event->hw;
5122         hwc->sample_period = attr->sample_period;
5123         if (attr->freq && attr->sample_freq)
5124                 hwc->sample_period = 1;
5125         hwc->last_period = hwc->sample_period;
5126
5127         local64_set(&hwc->period_left, hwc->sample_period);
5128
5129         /*
5130          * we currently do not support PERF_FORMAT_GROUP on inherited events
5131          */
5132         if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
5133                 goto done;
5134
5135         pmu = perf_init_event(event);
5136
5137 done:
5138         err = 0;
5139         if (!pmu)
5140                 err = -EINVAL;
5141         else if (IS_ERR(pmu))
5142                 err = PTR_ERR(pmu);
5143
5144         if (err) {
5145                 if (event->ns)
5146                         put_pid_ns(event->ns);
5147                 kfree(event);
5148                 return ERR_PTR(err);
5149         }
5150
5151         event->pmu = pmu;
5152
5153         if (!event->parent) {
5154                 atomic_inc(&nr_events);
5155                 if (event->attr.mmap || event->attr.mmap_data)
5156                         atomic_inc(&nr_mmap_events);
5157                 if (event->attr.comm)
5158                         atomic_inc(&nr_comm_events);
5159                 if (event->attr.task)
5160                         atomic_inc(&nr_task_events);
5161                 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
5162                         err = get_callchain_buffers();
5163                         if (err) {
5164                                 free_event(event);
5165                                 return ERR_PTR(err);
5166                         }
5167                 }
5168         }
5169
5170         return event;
5171 }
5172
5173 static int perf_copy_attr(struct perf_event_attr __user *uattr,
5174                           struct perf_event_attr *attr)
5175 {
5176         u32 size;
5177         int ret;
5178
5179         if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
5180                 return -EFAULT;
5181
5182         /*
5183          * zero the full structure, so that a short copy will be nice.
5184          */
5185         memset(attr, 0, sizeof(*attr));
5186
5187         ret = get_user(size, &uattr->size);
5188         if (ret)
5189                 return ret;
5190
5191         if (size > PAGE_SIZE)   /* silly large */
5192                 goto err_size;
5193
5194         if (!size)              /* abi compat */
5195                 size = PERF_ATTR_SIZE_VER0;
5196
5197         if (size < PERF_ATTR_SIZE_VER0)
5198                 goto err_size;
5199
5200         /*
5201          * If we're handed a bigger struct than we know of,
5202          * ensure all the unknown bits are 0 - i.e. new
5203          * user-space does not rely on any kernel feature
5204          * extensions we dont know about yet.
5205          */
5206         if (size > sizeof(*attr)) {
5207                 unsigned char __user *addr;
5208                 unsigned char __user *end;
5209                 unsigned char val;
5210
5211                 addr = (void __user *)uattr + sizeof(*attr);
5212                 end  = (void __user *)uattr + size;
5213
5214                 for (; addr < end; addr++) {
5215                         ret = get_user(val, addr);
5216                         if (ret)
5217                                 return ret;
5218                         if (val)
5219                                 goto err_size;
5220                 }
5221                 size = sizeof(*attr);
5222         }
5223
5224         ret = copy_from_user(attr, uattr, size);
5225         if (ret)
5226                 return -EFAULT;
5227
5228         /*
5229          * If the type exists, the corresponding creation will verify
5230          * the attr->config.
5231          */
5232         if (attr->type >= PERF_TYPE_MAX)
5233                 return -EINVAL;
5234
5235         if (attr->__reserved_1)
5236                 return -EINVAL;
5237
5238         if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
5239                 return -EINVAL;
5240
5241         if (attr->read_format & ~(PERF_FORMAT_MAX-1))
5242                 return -EINVAL;
5243
5244 out:
5245         return ret;
5246
5247 err_size:
5248         put_user(sizeof(*attr), &uattr->size);
5249         ret = -E2BIG;
5250         goto out;
5251 }
5252
5253 static int
5254 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
5255 {
5256         struct perf_buffer *buffer = NULL, *old_buffer = NULL;
5257         int ret = -EINVAL;
5258
5259         if (!output_event)
5260                 goto set;
5261
5262         /* don't allow circular references */
5263         if (event == output_event)
5264                 goto out;
5265
5266         /*
5267          * Don't allow cross-cpu buffers
5268          */
5269         if (output_event->cpu != event->cpu)
5270                 goto out;
5271
5272         /*
5273          * If its not a per-cpu buffer, it must be the same task.
5274          */
5275         if (output_event->cpu == -1 && output_event->ctx != event->ctx)
5276                 goto out;
5277
5278 set:
5279         mutex_lock(&event->mmap_mutex);
5280         /* Can't redirect output if we've got an active mmap() */
5281         if (atomic_read(&event->mmap_count))
5282                 goto unlock;
5283
5284         if (output_event) {
5285                 /* get the buffer we want to redirect to */
5286                 buffer = perf_buffer_get(output_event);
5287                 if (!buffer)
5288                         goto unlock;
5289         }
5290
5291         old_buffer = event->buffer;
5292         rcu_assign_pointer(event->buffer, buffer);
5293         ret = 0;
5294 unlock:
5295         mutex_unlock(&event->mmap_mutex);
5296
5297         if (old_buffer)
5298                 perf_buffer_put(old_buffer);
5299 out:
5300         return ret;
5301 }
5302
5303 /**
5304  * sys_perf_event_open - open a performance event, associate it to a task/cpu
5305  *
5306  * @attr_uptr:  event_id type attributes for monitoring/sampling
5307  * @pid:                target pid
5308  * @cpu:                target cpu
5309  * @group_fd:           group leader event fd
5310  */
5311 SYSCALL_DEFINE5(perf_event_open,
5312                 struct perf_event_attr __user *, attr_uptr,
5313                 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
5314 {
5315         struct perf_event *event, *group_leader = NULL, *output_event = NULL;
5316         struct perf_event_attr attr;
5317         struct perf_event_context *ctx;
5318         struct file *event_file = NULL;
5319         struct file *group_file = NULL;
5320         int event_fd;
5321         int fput_needed = 0;
5322         int err;
5323
5324         /* for future expandability... */
5325         if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
5326                 return -EINVAL;
5327
5328         err = perf_copy_attr(attr_uptr, &attr);
5329         if (err)
5330                 return err;
5331
5332         if (!attr.exclude_kernel) {
5333                 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
5334                         return -EACCES;
5335         }
5336
5337         if (attr.freq) {
5338                 if (attr.sample_freq > sysctl_perf_event_sample_rate)
5339                         return -EINVAL;
5340         }
5341
5342         event_fd = get_unused_fd_flags(O_RDWR);
5343         if (event_fd < 0)
5344                 return event_fd;
5345
5346         /*
5347          * Get the target context (task or percpu):
5348          */
5349         ctx = find_get_context(pid, cpu);
5350         if (IS_ERR(ctx)) {
5351                 err = PTR_ERR(ctx);
5352                 goto err_fd;
5353         }
5354
5355         if (group_fd != -1) {
5356                 group_leader = perf_fget_light(group_fd, &fput_needed);
5357                 if (IS_ERR(group_leader)) {
5358                         err = PTR_ERR(group_leader);
5359                         goto err_put_context;
5360                 }
5361                 group_file = group_leader->filp;
5362                 if (flags & PERF_FLAG_FD_OUTPUT)
5363                         output_event = group_leader;
5364                 if (flags & PERF_FLAG_FD_NO_GROUP)
5365                         group_leader = NULL;
5366         }
5367
5368         /*
5369          * Look up the group leader (we will attach this event to it):
5370          */
5371         if (group_leader) {
5372                 err = -EINVAL;
5373
5374                 /*
5375                  * Do not allow a recursive hierarchy (this new sibling
5376                  * becoming part of another group-sibling):
5377                  */
5378                 if (group_leader->group_leader != group_leader)
5379                         goto err_put_context;
5380                 /*
5381                  * Do not allow to attach to a group in a different
5382                  * task or CPU context:
5383                  */
5384                 if (group_leader->ctx != ctx)
5385                         goto err_put_context;
5386                 /*
5387                  * Only a group leader can be exclusive or pinned
5388                  */
5389                 if (attr.exclusive || attr.pinned)
5390                         goto err_put_context;
5391         }
5392
5393         event = perf_event_alloc(&attr, cpu, ctx, group_leader,
5394                                      NULL, NULL, GFP_KERNEL);
5395         if (IS_ERR(event)) {
5396                 err = PTR_ERR(event);
5397                 goto err_put_context;
5398         }
5399
5400         if (output_event) {
5401                 err = perf_event_set_output(event, output_event);
5402                 if (err)
5403                         goto err_free_put_context;
5404         }
5405
5406         event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
5407         if (IS_ERR(event_file)) {
5408                 err = PTR_ERR(event_file);
5409                 goto err_free_put_context;
5410         }
5411
5412         event->filp = event_file;
5413         WARN_ON_ONCE(ctx->parent_ctx);
5414         mutex_lock(&ctx->mutex);
5415         perf_install_in_context(ctx, event, cpu);
5416         ++ctx->generation;
5417         mutex_unlock(&ctx->mutex);
5418
5419         event->owner = current;
5420         get_task_struct(current);
5421         mutex_lock(&current->perf_event_mutex);
5422         list_add_tail(&event->owner_entry, &current->perf_event_list);
5423         mutex_unlock(&current->perf_event_mutex);
5424
5425         /*
5426          * Drop the reference on the group_event after placing the
5427          * new event on the sibling_list. This ensures destruction
5428          * of the group leader will find the pointer to itself in
5429          * perf_group_detach().
5430          */
5431         fput_light(group_file, fput_needed);
5432         fd_install(event_fd, event_file);
5433         return event_fd;
5434
5435 err_free_put_context:
5436         free_event(event);
5437 err_put_context:
5438         fput_light(group_file, fput_needed);
5439         put_ctx(ctx);
5440 err_fd:
5441         put_unused_fd(event_fd);
5442         return err;
5443 }
5444
5445 /**
5446  * perf_event_create_kernel_counter
5447  *
5448  * @attr: attributes of the counter to create
5449  * @cpu: cpu in which the counter is bound
5450  * @pid: task to profile
5451  */
5452 struct perf_event *
5453 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
5454                                  pid_t pid,
5455                                  perf_overflow_handler_t overflow_handler)
5456 {
5457         struct perf_event *event;
5458         struct perf_event_context *ctx;
5459         int err;
5460
5461         /*
5462          * Get the target context (task or percpu):
5463          */
5464
5465         ctx = find_get_context(pid, cpu);
5466         if (IS_ERR(ctx)) {
5467                 err = PTR_ERR(ctx);
5468                 goto err_exit;
5469         }
5470
5471         event = perf_event_alloc(attr, cpu, ctx, NULL,
5472                                  NULL, overflow_handler, GFP_KERNEL);
5473         if (IS_ERR(event)) {
5474                 err = PTR_ERR(event);
5475                 goto err_put_context;
5476         }
5477
5478         event->filp = NULL;
5479         WARN_ON_ONCE(ctx->parent_ctx);
5480         mutex_lock(&ctx->mutex);
5481         perf_install_in_context(ctx, event, cpu);
5482         ++ctx->generation;
5483         mutex_unlock(&ctx->mutex);
5484
5485         event->owner = current;
5486         get_task_struct(current);
5487         mutex_lock(&current->perf_event_mutex);
5488         list_add_tail(&event->owner_entry, &current->perf_event_list);
5489         mutex_unlock(&current->perf_event_mutex);
5490
5491         return event;
5492
5493  err_put_context:
5494         put_ctx(ctx);
5495  err_exit:
5496         return ERR_PTR(err);
5497 }
5498 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
5499
5500 /*
5501  * inherit a event from parent task to child task:
5502  */
5503 static struct perf_event *
5504 inherit_event(struct perf_event *parent_event,
5505               struct task_struct *parent,
5506               struct perf_event_context *parent_ctx,
5507               struct task_struct *child,
5508               struct perf_event *group_leader,
5509               struct perf_event_context *child_ctx)
5510 {
5511         struct perf_event *child_event;
5512
5513         /*
5514          * Instead of creating recursive hierarchies of events,
5515          * we link inherited events back to the original parent,
5516          * which has a filp for sure, which we use as the reference
5517          * count:
5518          */
5519         if (parent_event->parent)
5520                 parent_event = parent_event->parent;
5521
5522         child_event = perf_event_alloc(&parent_event->attr,
5523                                            parent_event->cpu, child_ctx,
5524                                            group_leader, parent_event,
5525                                            NULL, GFP_KERNEL);
5526         if (IS_ERR(child_event))
5527                 return child_event;
5528         get_ctx(child_ctx);
5529
5530         /*
5531          * Make the child state follow the state of the parent event,
5532          * not its attr.disabled bit.  We hold the parent's mutex,
5533          * so we won't race with perf_event_{en, dis}able_family.
5534          */
5535         if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
5536                 child_event->state = PERF_EVENT_STATE_INACTIVE;
5537         else
5538                 child_event->state = PERF_EVENT_STATE_OFF;
5539
5540         if (parent_event->attr.freq) {
5541                 u64 sample_period = parent_event->hw.sample_period;
5542                 struct hw_perf_event *hwc = &child_event->hw;
5543
5544                 hwc->sample_period = sample_period;
5545                 hwc->last_period   = sample_period;
5546
5547                 local64_set(&hwc->period_left, sample_period);
5548         }
5549
5550         child_event->overflow_handler = parent_event->overflow_handler;
5551
5552         /*
5553          * Link it up in the child's context:
5554          */
5555         add_event_to_ctx(child_event, child_ctx);
5556
5557         /*
5558          * Get a reference to the parent filp - we will fput it
5559          * when the child event exits. This is safe to do because
5560          * we are in the parent and we know that the filp still
5561          * exists and has a nonzero count:
5562          */
5563         atomic_long_inc(&parent_event->filp->f_count);
5564
5565         /*
5566          * Link this into the parent event's child list
5567          */
5568         WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5569         mutex_lock(&parent_event->child_mutex);
5570         list_add_tail(&child_event->child_list, &parent_event->child_list);
5571         mutex_unlock(&parent_event->child_mutex);
5572
5573         return child_event;
5574 }
5575
5576 static int inherit_group(struct perf_event *parent_event,
5577               struct task_struct *parent,
5578               struct perf_event_context *parent_ctx,
5579               struct task_struct *child,
5580               struct perf_event_context *child_ctx)
5581 {
5582         struct perf_event *leader;
5583         struct perf_event *sub;
5584         struct perf_event *child_ctr;
5585
5586         leader = inherit_event(parent_event, parent, parent_ctx,
5587                                  child, NULL, child_ctx);
5588         if (IS_ERR(leader))
5589                 return PTR_ERR(leader);
5590         list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
5591                 child_ctr = inherit_event(sub, parent, parent_ctx,
5592                                             child, leader, child_ctx);
5593                 if (IS_ERR(child_ctr))
5594                         return PTR_ERR(child_ctr);
5595         }
5596         return 0;
5597 }
5598
5599 static void sync_child_event(struct perf_event *child_event,
5600                                struct task_struct *child)
5601 {
5602         struct perf_event *parent_event = child_event->parent;
5603         u64 child_val;
5604
5605         if (child_event->attr.inherit_stat)
5606                 perf_event_read_event(child_event, child);
5607
5608         child_val = perf_event_count(child_event);
5609
5610         /*
5611          * Add back the child's count to the parent's count:
5612          */
5613         atomic64_add(child_val, &parent_event->child_count);
5614         atomic64_add(child_event->total_time_enabled,
5615                      &parent_event->child_total_time_enabled);
5616         atomic64_add(child_event->total_time_running,
5617                      &parent_event->child_total_time_running);
5618
5619         /*
5620          * Remove this event from the parent's list
5621          */
5622         WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5623         mutex_lock(&parent_event->child_mutex);
5624         list_del_init(&child_event->child_list);
5625         mutex_unlock(&parent_event->child_mutex);
5626
5627         /*
5628          * Release the parent event, if this was the last
5629          * reference to it.
5630          */
5631         fput(parent_event->filp);
5632 }
5633
5634 static void
5635 __perf_event_exit_task(struct perf_event *child_event,
5636                          struct perf_event_context *child_ctx,
5637                          struct task_struct *child)
5638 {
5639         struct perf_event *parent_event;
5640
5641         perf_event_remove_from_context(child_event);
5642
5643         parent_event = child_event->parent;
5644         /*
5645          * It can happen that parent exits first, and has events
5646          * that are still around due to the child reference. These
5647          * events need to be zapped - but otherwise linger.
5648          */
5649         if (parent_event) {
5650                 sync_child_event(child_event, child);
5651                 free_event(child_event);
5652         }
5653 }
5654
5655 /*
5656  * When a child task exits, feed back event values to parent events.
5657  */
5658 void perf_event_exit_task(struct task_struct *child)
5659 {
5660         struct perf_event *child_event, *tmp;
5661         struct perf_event_context *child_ctx;
5662         unsigned long flags;
5663
5664         if (likely(!child->perf_event_ctxp)) {
5665                 perf_event_task(child, NULL, 0);
5666                 return;
5667         }
5668
5669         local_irq_save(flags);
5670         /*
5671          * We can't reschedule here because interrupts are disabled,
5672          * and either child is current or it is a task that can't be
5673          * scheduled, so we are now safe from rescheduling changing
5674          * our context.
5675          */
5676         child_ctx = child->perf_event_ctxp;
5677         __perf_event_task_sched_out(child_ctx);
5678
5679         /*
5680          * Take the context lock here so that if find_get_context is
5681          * reading child->perf_event_ctxp, we wait until it has
5682          * incremented the context's refcount before we do put_ctx below.
5683          */
5684         raw_spin_lock(&child_ctx->lock);
5685         child->perf_event_ctxp = NULL;
5686         /*
5687          * If this context is a clone; unclone it so it can't get
5688          * swapped to another process while we're removing all
5689          * the events from it.
5690          */
5691         unclone_ctx(child_ctx);
5692         update_context_time(child_ctx);
5693         raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
5694
5695         /*
5696          * Report the task dead after unscheduling the events so that we
5697          * won't get any samples after PERF_RECORD_EXIT. We can however still
5698          * get a few PERF_RECORD_READ events.
5699          */
5700         perf_event_task(child, child_ctx, 0);
5701
5702         /*
5703          * We can recurse on the same lock type through:
5704          *
5705          *   __perf_event_exit_task()
5706          *     sync_child_event()
5707          *       fput(parent_event->filp)
5708          *         perf_release()
5709          *           mutex_lock(&ctx->mutex)
5710          *
5711          * But since its the parent context it won't be the same instance.
5712          */
5713         mutex_lock(&child_ctx->mutex);
5714
5715 again:
5716         list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
5717                                  group_entry)
5718                 __perf_event_exit_task(child_event, child_ctx, child);
5719
5720         list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
5721                                  group_entry)
5722                 __perf_event_exit_task(child_event, child_ctx, child);
5723
5724         /*
5725          * If the last event was a group event, it will have appended all
5726          * its siblings to the list, but we obtained 'tmp' before that which
5727          * will still point to the list head terminating the iteration.
5728          */
5729         if (!list_empty(&child_ctx->pinned_groups) ||
5730             !list_empty(&child_ctx->flexible_groups))
5731                 goto again;
5732
5733         mutex_unlock(&child_ctx->mutex);
5734
5735         put_ctx(child_ctx);
5736 }
5737
5738 static void perf_free_event(struct perf_event *event,
5739                             struct perf_event_context *ctx)
5740 {
5741         struct perf_event *parent = event->parent;
5742
5743         if (WARN_ON_ONCE(!parent))
5744                 return;
5745
5746         mutex_lock(&parent->child_mutex);
5747         list_del_init(&event->child_list);
5748         mutex_unlock(&parent->child_mutex);
5749
5750         fput(parent->filp);
5751
5752         perf_group_detach(event);
5753         list_del_event(event, ctx);
5754         free_event(event);
5755 }
5756
5757 /*
5758  * free an unexposed, unused context as created by inheritance by
5759  * init_task below, used by fork() in case of fail.
5760  */
5761 void perf_event_free_task(struct task_struct *task)
5762 {
5763         struct perf_event_context *ctx = task->perf_event_ctxp;
5764         struct perf_event *event, *tmp;
5765
5766         if (!ctx)
5767                 return;
5768
5769         mutex_lock(&ctx->mutex);
5770 again:
5771         list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5772                 perf_free_event(event, ctx);
5773
5774         list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
5775                                  group_entry)
5776                 perf_free_event(event, ctx);
5777
5778         if (!list_empty(&ctx->pinned_groups) ||
5779             !list_empty(&ctx->flexible_groups))
5780                 goto again;
5781
5782         mutex_unlock(&ctx->mutex);
5783
5784         put_ctx(ctx);
5785 }
5786
5787 static int
5788 inherit_task_group(struct perf_event *event, struct task_struct *parent,
5789                    struct perf_event_context *parent_ctx,
5790                    struct task_struct *child,
5791                    int *inherited_all)
5792 {
5793         int ret;
5794         struct perf_event_context *child_ctx = child->perf_event_ctxp;
5795
5796         if (!event->attr.inherit) {
5797                 *inherited_all = 0;
5798                 return 0;
5799         }
5800
5801         if (!child_ctx) {
5802                 /*
5803                  * This is executed from the parent task context, so
5804                  * inherit events that have been marked for cloning.
5805                  * First allocate and initialize a context for the
5806                  * child.
5807                  */
5808
5809                 child_ctx = kzalloc(sizeof(struct perf_event_context),
5810                                     GFP_KERNEL);
5811                 if (!child_ctx)
5812                         return -ENOMEM;
5813
5814                 __perf_event_init_context(child_ctx, child);
5815                 child->perf_event_ctxp = child_ctx;
5816                 get_task_struct(child);
5817         }
5818
5819         ret = inherit_group(event, parent, parent_ctx,
5820                             child, child_ctx);
5821
5822         if (ret)
5823                 *inherited_all = 0;
5824
5825         return ret;
5826 }
5827
5828
5829 /*
5830  * Initialize the perf_event context in task_struct
5831  */
5832 int perf_event_init_task(struct task_struct *child)
5833 {
5834         struct perf_event_context *child_ctx, *parent_ctx;
5835         struct perf_event_context *cloned_ctx;
5836         struct perf_event *event;
5837         struct task_struct *parent = current;
5838         int inherited_all = 1;
5839         int ret = 0;
5840
5841         child->perf_event_ctxp = NULL;
5842
5843         mutex_init(&child->perf_event_mutex);
5844         INIT_LIST_HEAD(&child->perf_event_list);
5845
5846         if (likely(!parent->perf_event_ctxp))
5847                 return 0;
5848
5849         /*
5850          * If the parent's context is a clone, pin it so it won't get
5851          * swapped under us.
5852          */
5853         parent_ctx = perf_pin_task_context(parent);
5854
5855         /*
5856          * No need to check if parent_ctx != NULL here; since we saw
5857          * it non-NULL earlier, the only reason for it to become NULL
5858          * is if we exit, and since we're currently in the middle of
5859          * a fork we can't be exiting at the same time.
5860          */
5861
5862         /*
5863          * Lock the parent list. No need to lock the child - not PID
5864          * hashed yet and not running, so nobody can access it.
5865          */
5866         mutex_lock(&parent_ctx->mutex);
5867
5868         /*
5869          * We dont have to disable NMIs - we are only looking at
5870          * the list, not manipulating it:
5871          */
5872         list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
5873                 ret = inherit_task_group(event, parent, parent_ctx, child,
5874                                          &inherited_all);
5875                 if (ret)
5876                         break;
5877         }
5878
5879         list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
5880                 ret = inherit_task_group(event, parent, parent_ctx, child,
5881                                          &inherited_all);
5882                 if (ret)
5883                         break;
5884         }
5885
5886         child_ctx = child->perf_event_ctxp;
5887
5888         if (child_ctx && inherited_all) {
5889                 /*
5890                  * Mark the child context as a clone of the parent
5891                  * context, or of whatever the parent is a clone of.
5892                  * Note that if the parent is a clone, it could get
5893                  * uncloned at any point, but that doesn't matter
5894                  * because the list of events and the generation
5895                  * count can't have changed since we took the mutex.
5896                  */
5897                 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
5898                 if (cloned_ctx) {
5899                         child_ctx->parent_ctx = cloned_ctx;
5900                         child_ctx->parent_gen = parent_ctx->parent_gen;
5901                 } else {
5902                         child_ctx->parent_ctx = parent_ctx;
5903                         child_ctx->parent_gen = parent_ctx->generation;
5904                 }
5905                 get_ctx(child_ctx->parent_ctx);
5906         }
5907
5908         mutex_unlock(&parent_ctx->mutex);
5909
5910         perf_unpin_context(parent_ctx);
5911
5912         return ret;
5913 }
5914
5915 static void __init perf_event_init_all_cpus(void)
5916 {
5917         int cpu;
5918         struct perf_cpu_context *cpuctx;
5919
5920         for_each_possible_cpu(cpu) {
5921                 cpuctx = &per_cpu(perf_cpu_context, cpu);
5922                 mutex_init(&cpuctx->hlist_mutex);
5923                 __perf_event_init_context(&cpuctx->ctx, NULL);
5924         }
5925 }
5926
5927 static void __cpuinit perf_event_init_cpu(int cpu)
5928 {
5929         struct perf_cpu_context *cpuctx;
5930
5931         cpuctx = &per_cpu(perf_cpu_context, cpu);
5932
5933         spin_lock(&perf_resource_lock);
5934         cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
5935         spin_unlock(&perf_resource_lock);
5936
5937         mutex_lock(&cpuctx->hlist_mutex);
5938         if (cpuctx->hlist_refcount > 0) {
5939                 struct swevent_hlist *hlist;
5940
5941                 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
5942                 WARN_ON_ONCE(!hlist);
5943                 rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
5944         }
5945         mutex_unlock(&cpuctx->hlist_mutex);
5946 }
5947
5948 #ifdef CONFIG_HOTPLUG_CPU
5949 static void __perf_event_exit_cpu(void *info)
5950 {
5951         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
5952         struct perf_event_context *ctx = &cpuctx->ctx;
5953         struct perf_event *event, *tmp;
5954
5955         list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5956                 __perf_event_remove_from_context(event);
5957         list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
5958                 __perf_event_remove_from_context(event);
5959 }
5960 static void perf_event_exit_cpu(int cpu)
5961 {
5962         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
5963         struct perf_event_context *ctx = &cpuctx->ctx;
5964
5965         mutex_lock(&cpuctx->hlist_mutex);
5966         swevent_hlist_release(cpuctx);
5967         mutex_unlock(&cpuctx->hlist_mutex);
5968
5969         mutex_lock(&ctx->mutex);
5970         smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
5971         mutex_unlock(&ctx->mutex);
5972 }
5973 #else
5974 static inline void perf_event_exit_cpu(int cpu) { }
5975 #endif
5976
5977 static int __cpuinit
5978 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
5979 {
5980         unsigned int cpu = (long)hcpu;
5981
5982         switch (action & ~CPU_TASKS_FROZEN) {
5983
5984         case CPU_UP_PREPARE:
5985         case CPU_DOWN_FAILED:
5986                 perf_event_init_cpu(cpu);
5987                 break;
5988
5989         case CPU_UP_CANCELED:
5990         case CPU_DOWN_PREPARE:
5991                 perf_event_exit_cpu(cpu);
5992                 break;
5993
5994         default:
5995                 break;
5996         }
5997
5998         return NOTIFY_OK;
5999 }
6000
6001 void __init perf_event_init(void)
6002 {
6003         perf_event_init_all_cpus();
6004         init_srcu_struct(&pmus_srcu);
6005         perf_pmu_register(&perf_swevent);
6006         perf_pmu_register(&perf_cpu_clock);
6007         perf_pmu_register(&perf_task_clock);
6008         perf_tp_register();
6009         perf_cpu_notifier(perf_cpu_notify);
6010 }
6011
6012 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
6013                                         struct sysdev_class_attribute *attr,
6014                                         char *buf)
6015 {
6016         return sprintf(buf, "%d\n", perf_reserved_percpu);
6017 }
6018
6019 static ssize_t
6020 perf_set_reserve_percpu(struct sysdev_class *class,
6021                         struct sysdev_class_attribute *attr,
6022                         const char *buf,
6023                         size_t count)
6024 {
6025         struct perf_cpu_context *cpuctx;
6026         unsigned long val;
6027         int err, cpu, mpt;
6028
6029         err = strict_strtoul(buf, 10, &val);
6030         if (err)
6031                 return err;
6032         if (val > perf_max_events)
6033                 return -EINVAL;
6034
6035         spin_lock(&perf_resource_lock);
6036         perf_reserved_percpu = val;
6037         for_each_online_cpu(cpu) {
6038                 cpuctx = &per_cpu(perf_cpu_context, cpu);
6039                 raw_spin_lock_irq(&cpuctx->ctx.lock);
6040                 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
6041                           perf_max_events - perf_reserved_percpu);
6042                 cpuctx->max_pertask = mpt;
6043                 raw_spin_unlock_irq(&cpuctx->ctx.lock);
6044         }
6045         spin_unlock(&perf_resource_lock);
6046
6047         return count;
6048 }
6049
6050 static ssize_t perf_show_overcommit(struct sysdev_class *class,
6051                                     struct sysdev_class_attribute *attr,
6052                                     char *buf)
6053 {
6054         return sprintf(buf, "%d\n", perf_overcommit);
6055 }
6056
6057 static ssize_t
6058 perf_set_overcommit(struct sysdev_class *class,
6059                     struct sysdev_class_attribute *attr,
6060                     const char *buf, size_t count)
6061 {
6062         unsigned long val;
6063         int err;
6064
6065         err = strict_strtoul(buf, 10, &val);
6066         if (err)
6067                 return err;
6068         if (val > 1)
6069                 return -EINVAL;
6070
6071         spin_lock(&perf_resource_lock);
6072         perf_overcommit = val;
6073         spin_unlock(&perf_resource_lock);
6074
6075         return count;
6076 }
6077
6078 static SYSDEV_CLASS_ATTR(
6079                                 reserve_percpu,
6080                                 0644,
6081                                 perf_show_reserve_percpu,
6082                                 perf_set_reserve_percpu
6083                         );
6084
6085 static SYSDEV_CLASS_ATTR(
6086                                 overcommit,
6087                                 0644,
6088                                 perf_show_overcommit,
6089                                 perf_set_overcommit
6090                         );
6091
6092 static struct attribute *perfclass_attrs[] = {
6093         &attr_reserve_percpu.attr,
6094         &attr_overcommit.attr,
6095         NULL
6096 };
6097
6098 static struct attribute_group perfclass_attr_group = {
6099         .attrs                  = perfclass_attrs,
6100         .name                   = "perf_events",
6101 };
6102
6103 static int __init perf_event_sysfs_init(void)
6104 {
6105         return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
6106                                   &perfclass_attr_group);
6107 }
6108 device_initcall(perf_event_sysfs_init);