]> git.karo-electronics.de Git - mv-sheeva.git/blob - kernel/perf_counter.c
perf_counter: Close race in perf_lock_task_context()
[mv-sheeva.git] / kernel / perf_counter.c
1 /*
2  * Performance counter core code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  *  For licensing details see kernel-base/COPYING
10  */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/sysfs.h>
19 #include <linux/dcache.h>
20 #include <linux/percpu.h>
21 #include <linux/ptrace.h>
22 #include <linux/vmstat.h>
23 #include <linux/hardirq.h>
24 #include <linux/rculist.h>
25 #include <linux/uaccess.h>
26 #include <linux/syscalls.h>
27 #include <linux/anon_inodes.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/perf_counter.h>
30
31 #include <asm/irq_regs.h>
32
33 /*
34  * Each CPU has a list of per CPU counters:
35  */
36 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37
38 int perf_max_counters __read_mostly = 1;
39 static int perf_reserved_percpu __read_mostly;
40 static int perf_overcommit __read_mostly = 1;
41
42 static atomic_t nr_counters __read_mostly;
43 static atomic_t nr_mmap_counters __read_mostly;
44 static atomic_t nr_comm_counters __read_mostly;
45
46 /*
47  * perf counter paranoia level:
48  *  0 - not paranoid
49  *  1 - disallow cpu counters to unpriv
50  *  2 - disallow kernel profiling to unpriv
51  */
52 int sysctl_perf_counter_paranoid __read_mostly;
53
54 static inline bool perf_paranoid_cpu(void)
55 {
56         return sysctl_perf_counter_paranoid > 0;
57 }
58
59 static inline bool perf_paranoid_kernel(void)
60 {
61         return sysctl_perf_counter_paranoid > 1;
62 }
63
64 int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
65
66 /*
67  * max perf counter sample rate
68  */
69 int sysctl_perf_counter_sample_rate __read_mostly = 100000;
70
71 static atomic64_t perf_counter_id;
72
73 /*
74  * Lock for (sysadmin-configurable) counter reservations:
75  */
76 static DEFINE_SPINLOCK(perf_resource_lock);
77
78 /*
79  * Architecture provided APIs - weak aliases:
80  */
81 extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
82 {
83         return NULL;
84 }
85
86 void __weak hw_perf_disable(void)               { barrier(); }
87 void __weak hw_perf_enable(void)                { barrier(); }
88
89 void __weak hw_perf_counter_setup(int cpu)      { barrier(); }
90
91 int __weak
92 hw_perf_group_sched_in(struct perf_counter *group_leader,
93                struct perf_cpu_context *cpuctx,
94                struct perf_counter_context *ctx, int cpu)
95 {
96         return 0;
97 }
98
99 void __weak perf_counter_print_debug(void)      { }
100
101 static DEFINE_PER_CPU(int, disable_count);
102
103 void __perf_disable(void)
104 {
105         __get_cpu_var(disable_count)++;
106 }
107
108 bool __perf_enable(void)
109 {
110         return !--__get_cpu_var(disable_count);
111 }
112
113 void perf_disable(void)
114 {
115         __perf_disable();
116         hw_perf_disable();
117 }
118
119 void perf_enable(void)
120 {
121         if (__perf_enable())
122                 hw_perf_enable();
123 }
124
125 static void get_ctx(struct perf_counter_context *ctx)
126 {
127         WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
128 }
129
130 static void free_ctx(struct rcu_head *head)
131 {
132         struct perf_counter_context *ctx;
133
134         ctx = container_of(head, struct perf_counter_context, rcu_head);
135         kfree(ctx);
136 }
137
138 static void put_ctx(struct perf_counter_context *ctx)
139 {
140         if (atomic_dec_and_test(&ctx->refcount)) {
141                 if (ctx->parent_ctx)
142                         put_ctx(ctx->parent_ctx);
143                 if (ctx->task)
144                         put_task_struct(ctx->task);
145                 call_rcu(&ctx->rcu_head, free_ctx);
146         }
147 }
148
149 /*
150  * Get the perf_counter_context for a task and lock it.
151  * This has to cope with with the fact that until it is locked,
152  * the context could get moved to another task.
153  */
154 static struct perf_counter_context *
155 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
156 {
157         struct perf_counter_context *ctx;
158
159         rcu_read_lock();
160  retry:
161         ctx = rcu_dereference(task->perf_counter_ctxp);
162         if (ctx) {
163                 /*
164                  * If this context is a clone of another, it might
165                  * get swapped for another underneath us by
166                  * perf_counter_task_sched_out, though the
167                  * rcu_read_lock() protects us from any context
168                  * getting freed.  Lock the context and check if it
169                  * got swapped before we could get the lock, and retry
170                  * if so.  If we locked the right context, then it
171                  * can't get swapped on us any more.
172                  */
173                 spin_lock_irqsave(&ctx->lock, *flags);
174                 if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
175                         spin_unlock_irqrestore(&ctx->lock, *flags);
176                         goto retry;
177                 }
178
179                 if (!atomic_inc_not_zero(&ctx->refcount)) {
180                         spin_unlock_irqrestore(&ctx->lock, *flags);
181                         ctx = NULL;
182                 }
183         }
184         rcu_read_unlock();
185         return ctx;
186 }
187
188 /*
189  * Get the context for a task and increment its pin_count so it
190  * can't get swapped to another task.  This also increments its
191  * reference count so that the context can't get freed.
192  */
193 static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
194 {
195         struct perf_counter_context *ctx;
196         unsigned long flags;
197
198         ctx = perf_lock_task_context(task, &flags);
199         if (ctx) {
200                 ++ctx->pin_count;
201                 spin_unlock_irqrestore(&ctx->lock, flags);
202         }
203         return ctx;
204 }
205
206 static void perf_unpin_context(struct perf_counter_context *ctx)
207 {
208         unsigned long flags;
209
210         spin_lock_irqsave(&ctx->lock, flags);
211         --ctx->pin_count;
212         spin_unlock_irqrestore(&ctx->lock, flags);
213         put_ctx(ctx);
214 }
215
216 /*
217  * Add a counter from the lists for its context.
218  * Must be called with ctx->mutex and ctx->lock held.
219  */
220 static void
221 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
222 {
223         struct perf_counter *group_leader = counter->group_leader;
224
225         /*
226          * Depending on whether it is a standalone or sibling counter,
227          * add it straight to the context's counter list, or to the group
228          * leader's sibling list:
229          */
230         if (group_leader == counter)
231                 list_add_tail(&counter->list_entry, &ctx->counter_list);
232         else {
233                 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
234                 group_leader->nr_siblings++;
235         }
236
237         list_add_rcu(&counter->event_entry, &ctx->event_list);
238         ctx->nr_counters++;
239 }
240
241 /*
242  * Remove a counter from the lists for its context.
243  * Must be called with ctx->mutex and ctx->lock held.
244  */
245 static void
246 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
247 {
248         struct perf_counter *sibling, *tmp;
249
250         if (list_empty(&counter->list_entry))
251                 return;
252         ctx->nr_counters--;
253
254         list_del_init(&counter->list_entry);
255         list_del_rcu(&counter->event_entry);
256
257         if (counter->group_leader != counter)
258                 counter->group_leader->nr_siblings--;
259
260         /*
261          * If this was a group counter with sibling counters then
262          * upgrade the siblings to singleton counters by adding them
263          * to the context list directly:
264          */
265         list_for_each_entry_safe(sibling, tmp,
266                                  &counter->sibling_list, list_entry) {
267
268                 list_move_tail(&sibling->list_entry, &ctx->counter_list);
269                 sibling->group_leader = sibling;
270         }
271 }
272
273 static void
274 counter_sched_out(struct perf_counter *counter,
275                   struct perf_cpu_context *cpuctx,
276                   struct perf_counter_context *ctx)
277 {
278         if (counter->state != PERF_COUNTER_STATE_ACTIVE)
279                 return;
280
281         counter->state = PERF_COUNTER_STATE_INACTIVE;
282         counter->tstamp_stopped = ctx->time;
283         counter->pmu->disable(counter);
284         counter->oncpu = -1;
285
286         if (!is_software_counter(counter))
287                 cpuctx->active_oncpu--;
288         ctx->nr_active--;
289         if (counter->attr.exclusive || !cpuctx->active_oncpu)
290                 cpuctx->exclusive = 0;
291 }
292
293 static void
294 group_sched_out(struct perf_counter *group_counter,
295                 struct perf_cpu_context *cpuctx,
296                 struct perf_counter_context *ctx)
297 {
298         struct perf_counter *counter;
299
300         if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
301                 return;
302
303         counter_sched_out(group_counter, cpuctx, ctx);
304
305         /*
306          * Schedule out siblings (if any):
307          */
308         list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
309                 counter_sched_out(counter, cpuctx, ctx);
310
311         if (group_counter->attr.exclusive)
312                 cpuctx->exclusive = 0;
313 }
314
315 /*
316  * Cross CPU call to remove a performance counter
317  *
318  * We disable the counter on the hardware level first. After that we
319  * remove it from the context list.
320  */
321 static void __perf_counter_remove_from_context(void *info)
322 {
323         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
324         struct perf_counter *counter = info;
325         struct perf_counter_context *ctx = counter->ctx;
326
327         /*
328          * If this is a task context, we need to check whether it is
329          * the current task context of this cpu. If not it has been
330          * scheduled out before the smp call arrived.
331          */
332         if (ctx->task && cpuctx->task_ctx != ctx)
333                 return;
334
335         spin_lock(&ctx->lock);
336         /*
337          * Protect the list operation against NMI by disabling the
338          * counters on a global level.
339          */
340         perf_disable();
341
342         counter_sched_out(counter, cpuctx, ctx);
343
344         list_del_counter(counter, ctx);
345
346         if (!ctx->task) {
347                 /*
348                  * Allow more per task counters with respect to the
349                  * reservation:
350                  */
351                 cpuctx->max_pertask =
352                         min(perf_max_counters - ctx->nr_counters,
353                             perf_max_counters - perf_reserved_percpu);
354         }
355
356         perf_enable();
357         spin_unlock(&ctx->lock);
358 }
359
360
361 /*
362  * Remove the counter from a task's (or a CPU's) list of counters.
363  *
364  * Must be called with ctx->mutex held.
365  *
366  * CPU counters are removed with a smp call. For task counters we only
367  * call when the task is on a CPU.
368  *
369  * If counter->ctx is a cloned context, callers must make sure that
370  * every task struct that counter->ctx->task could possibly point to
371  * remains valid.  This is OK when called from perf_release since
372  * that only calls us on the top-level context, which can't be a clone.
373  * When called from perf_counter_exit_task, it's OK because the
374  * context has been detached from its task.
375  */
376 static void perf_counter_remove_from_context(struct perf_counter *counter)
377 {
378         struct perf_counter_context *ctx = counter->ctx;
379         struct task_struct *task = ctx->task;
380
381         if (!task) {
382                 /*
383                  * Per cpu counters are removed via an smp call and
384                  * the removal is always sucessful.
385                  */
386                 smp_call_function_single(counter->cpu,
387                                          __perf_counter_remove_from_context,
388                                          counter, 1);
389                 return;
390         }
391
392 retry:
393         task_oncpu_function_call(task, __perf_counter_remove_from_context,
394                                  counter);
395
396         spin_lock_irq(&ctx->lock);
397         /*
398          * If the context is active we need to retry the smp call.
399          */
400         if (ctx->nr_active && !list_empty(&counter->list_entry)) {
401                 spin_unlock_irq(&ctx->lock);
402                 goto retry;
403         }
404
405         /*
406          * The lock prevents that this context is scheduled in so we
407          * can remove the counter safely, if the call above did not
408          * succeed.
409          */
410         if (!list_empty(&counter->list_entry)) {
411                 list_del_counter(counter, ctx);
412         }
413         spin_unlock_irq(&ctx->lock);
414 }
415
416 static inline u64 perf_clock(void)
417 {
418         return cpu_clock(smp_processor_id());
419 }
420
421 /*
422  * Update the record of the current time in a context.
423  */
424 static void update_context_time(struct perf_counter_context *ctx)
425 {
426         u64 now = perf_clock();
427
428         ctx->time += now - ctx->timestamp;
429         ctx->timestamp = now;
430 }
431
432 /*
433  * Update the total_time_enabled and total_time_running fields for a counter.
434  */
435 static void update_counter_times(struct perf_counter *counter)
436 {
437         struct perf_counter_context *ctx = counter->ctx;
438         u64 run_end;
439
440         if (counter->state < PERF_COUNTER_STATE_INACTIVE)
441                 return;
442
443         counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
444
445         if (counter->state == PERF_COUNTER_STATE_INACTIVE)
446                 run_end = counter->tstamp_stopped;
447         else
448                 run_end = ctx->time;
449
450         counter->total_time_running = run_end - counter->tstamp_running;
451 }
452
453 /*
454  * Update total_time_enabled and total_time_running for all counters in a group.
455  */
456 static void update_group_times(struct perf_counter *leader)
457 {
458         struct perf_counter *counter;
459
460         update_counter_times(leader);
461         list_for_each_entry(counter, &leader->sibling_list, list_entry)
462                 update_counter_times(counter);
463 }
464
465 /*
466  * Cross CPU call to disable a performance counter
467  */
468 static void __perf_counter_disable(void *info)
469 {
470         struct perf_counter *counter = info;
471         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
472         struct perf_counter_context *ctx = counter->ctx;
473
474         /*
475          * If this is a per-task counter, need to check whether this
476          * counter's task is the current task on this cpu.
477          */
478         if (ctx->task && cpuctx->task_ctx != ctx)
479                 return;
480
481         spin_lock(&ctx->lock);
482
483         /*
484          * If the counter is on, turn it off.
485          * If it is in error state, leave it in error state.
486          */
487         if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
488                 update_context_time(ctx);
489                 update_counter_times(counter);
490                 if (counter == counter->group_leader)
491                         group_sched_out(counter, cpuctx, ctx);
492                 else
493                         counter_sched_out(counter, cpuctx, ctx);
494                 counter->state = PERF_COUNTER_STATE_OFF;
495         }
496
497         spin_unlock(&ctx->lock);
498 }
499
500 /*
501  * Disable a counter.
502  *
503  * If counter->ctx is a cloned context, callers must make sure that
504  * every task struct that counter->ctx->task could possibly point to
505  * remains valid.  This condition is satisifed when called through
506  * perf_counter_for_each_child or perf_counter_for_each because they
507  * hold the top-level counter's child_mutex, so any descendant that
508  * goes to exit will block in sync_child_counter.
509  * When called from perf_pending_counter it's OK because counter->ctx
510  * is the current context on this CPU and preemption is disabled,
511  * hence we can't get into perf_counter_task_sched_out for this context.
512  */
513 static void perf_counter_disable(struct perf_counter *counter)
514 {
515         struct perf_counter_context *ctx = counter->ctx;
516         struct task_struct *task = ctx->task;
517
518         if (!task) {
519                 /*
520                  * Disable the counter on the cpu that it's on
521                  */
522                 smp_call_function_single(counter->cpu, __perf_counter_disable,
523                                          counter, 1);
524                 return;
525         }
526
527  retry:
528         task_oncpu_function_call(task, __perf_counter_disable, counter);
529
530         spin_lock_irq(&ctx->lock);
531         /*
532          * If the counter is still active, we need to retry the cross-call.
533          */
534         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
535                 spin_unlock_irq(&ctx->lock);
536                 goto retry;
537         }
538
539         /*
540          * Since we have the lock this context can't be scheduled
541          * in, so we can change the state safely.
542          */
543         if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
544                 update_counter_times(counter);
545                 counter->state = PERF_COUNTER_STATE_OFF;
546         }
547
548         spin_unlock_irq(&ctx->lock);
549 }
550
551 static int
552 counter_sched_in(struct perf_counter *counter,
553                  struct perf_cpu_context *cpuctx,
554                  struct perf_counter_context *ctx,
555                  int cpu)
556 {
557         if (counter->state <= PERF_COUNTER_STATE_OFF)
558                 return 0;
559
560         counter->state = PERF_COUNTER_STATE_ACTIVE;
561         counter->oncpu = cpu;   /* TODO: put 'cpu' into cpuctx->cpu */
562         /*
563          * The new state must be visible before we turn it on in the hardware:
564          */
565         smp_wmb();
566
567         if (counter->pmu->enable(counter)) {
568                 counter->state = PERF_COUNTER_STATE_INACTIVE;
569                 counter->oncpu = -1;
570                 return -EAGAIN;
571         }
572
573         counter->tstamp_running += ctx->time - counter->tstamp_stopped;
574
575         if (!is_software_counter(counter))
576                 cpuctx->active_oncpu++;
577         ctx->nr_active++;
578
579         if (counter->attr.exclusive)
580                 cpuctx->exclusive = 1;
581
582         return 0;
583 }
584
585 static int
586 group_sched_in(struct perf_counter *group_counter,
587                struct perf_cpu_context *cpuctx,
588                struct perf_counter_context *ctx,
589                int cpu)
590 {
591         struct perf_counter *counter, *partial_group;
592         int ret;
593
594         if (group_counter->state == PERF_COUNTER_STATE_OFF)
595                 return 0;
596
597         ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
598         if (ret)
599                 return ret < 0 ? ret : 0;
600
601         if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
602                 return -EAGAIN;
603
604         /*
605          * Schedule in siblings as one group (if any):
606          */
607         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
608                 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
609                         partial_group = counter;
610                         goto group_error;
611                 }
612         }
613
614         return 0;
615
616 group_error:
617         /*
618          * Groups can be scheduled in as one unit only, so undo any
619          * partial group before returning:
620          */
621         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
622                 if (counter == partial_group)
623                         break;
624                 counter_sched_out(counter, cpuctx, ctx);
625         }
626         counter_sched_out(group_counter, cpuctx, ctx);
627
628         return -EAGAIN;
629 }
630
631 /*
632  * Return 1 for a group consisting entirely of software counters,
633  * 0 if the group contains any hardware counters.
634  */
635 static int is_software_only_group(struct perf_counter *leader)
636 {
637         struct perf_counter *counter;
638
639         if (!is_software_counter(leader))
640                 return 0;
641
642         list_for_each_entry(counter, &leader->sibling_list, list_entry)
643                 if (!is_software_counter(counter))
644                         return 0;
645
646         return 1;
647 }
648
649 /*
650  * Work out whether we can put this counter group on the CPU now.
651  */
652 static int group_can_go_on(struct perf_counter *counter,
653                            struct perf_cpu_context *cpuctx,
654                            int can_add_hw)
655 {
656         /*
657          * Groups consisting entirely of software counters can always go on.
658          */
659         if (is_software_only_group(counter))
660                 return 1;
661         /*
662          * If an exclusive group is already on, no other hardware
663          * counters can go on.
664          */
665         if (cpuctx->exclusive)
666                 return 0;
667         /*
668          * If this group is exclusive and there are already
669          * counters on the CPU, it can't go on.
670          */
671         if (counter->attr.exclusive && cpuctx->active_oncpu)
672                 return 0;
673         /*
674          * Otherwise, try to add it if all previous groups were able
675          * to go on.
676          */
677         return can_add_hw;
678 }
679
680 static void add_counter_to_ctx(struct perf_counter *counter,
681                                struct perf_counter_context *ctx)
682 {
683         list_add_counter(counter, ctx);
684         counter->tstamp_enabled = ctx->time;
685         counter->tstamp_running = ctx->time;
686         counter->tstamp_stopped = ctx->time;
687 }
688
689 /*
690  * Cross CPU call to install and enable a performance counter
691  *
692  * Must be called with ctx->mutex held
693  */
694 static void __perf_install_in_context(void *info)
695 {
696         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
697         struct perf_counter *counter = info;
698         struct perf_counter_context *ctx = counter->ctx;
699         struct perf_counter *leader = counter->group_leader;
700         int cpu = smp_processor_id();
701         int err;
702
703         /*
704          * If this is a task context, we need to check whether it is
705          * the current task context of this cpu. If not it has been
706          * scheduled out before the smp call arrived.
707          * Or possibly this is the right context but it isn't
708          * on this cpu because it had no counters.
709          */
710         if (ctx->task && cpuctx->task_ctx != ctx) {
711                 if (cpuctx->task_ctx || ctx->task != current)
712                         return;
713                 cpuctx->task_ctx = ctx;
714         }
715
716         spin_lock(&ctx->lock);
717         ctx->is_active = 1;
718         update_context_time(ctx);
719
720         /*
721          * Protect the list operation against NMI by disabling the
722          * counters on a global level. NOP for non NMI based counters.
723          */
724         perf_disable();
725
726         add_counter_to_ctx(counter, ctx);
727
728         /*
729          * Don't put the counter on if it is disabled or if
730          * it is in a group and the group isn't on.
731          */
732         if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
733             (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
734                 goto unlock;
735
736         /*
737          * An exclusive counter can't go on if there are already active
738          * hardware counters, and no hardware counter can go on if there
739          * is already an exclusive counter on.
740          */
741         if (!group_can_go_on(counter, cpuctx, 1))
742                 err = -EEXIST;
743         else
744                 err = counter_sched_in(counter, cpuctx, ctx, cpu);
745
746         if (err) {
747                 /*
748                  * This counter couldn't go on.  If it is in a group
749                  * then we have to pull the whole group off.
750                  * If the counter group is pinned then put it in error state.
751                  */
752                 if (leader != counter)
753                         group_sched_out(leader, cpuctx, ctx);
754                 if (leader->attr.pinned) {
755                         update_group_times(leader);
756                         leader->state = PERF_COUNTER_STATE_ERROR;
757                 }
758         }
759
760         if (!err && !ctx->task && cpuctx->max_pertask)
761                 cpuctx->max_pertask--;
762
763  unlock:
764         perf_enable();
765
766         spin_unlock(&ctx->lock);
767 }
768
769 /*
770  * Attach a performance counter to a context
771  *
772  * First we add the counter to the list with the hardware enable bit
773  * in counter->hw_config cleared.
774  *
775  * If the counter is attached to a task which is on a CPU we use a smp
776  * call to enable it in the task context. The task might have been
777  * scheduled away, but we check this in the smp call again.
778  *
779  * Must be called with ctx->mutex held.
780  */
781 static void
782 perf_install_in_context(struct perf_counter_context *ctx,
783                         struct perf_counter *counter,
784                         int cpu)
785 {
786         struct task_struct *task = ctx->task;
787
788         if (!task) {
789                 /*
790                  * Per cpu counters are installed via an smp call and
791                  * the install is always sucessful.
792                  */
793                 smp_call_function_single(cpu, __perf_install_in_context,
794                                          counter, 1);
795                 return;
796         }
797
798 retry:
799         task_oncpu_function_call(task, __perf_install_in_context,
800                                  counter);
801
802         spin_lock_irq(&ctx->lock);
803         /*
804          * we need to retry the smp call.
805          */
806         if (ctx->is_active && list_empty(&counter->list_entry)) {
807                 spin_unlock_irq(&ctx->lock);
808                 goto retry;
809         }
810
811         /*
812          * The lock prevents that this context is scheduled in so we
813          * can add the counter safely, if it the call above did not
814          * succeed.
815          */
816         if (list_empty(&counter->list_entry))
817                 add_counter_to_ctx(counter, ctx);
818         spin_unlock_irq(&ctx->lock);
819 }
820
821 /*
822  * Cross CPU call to enable a performance counter
823  */
824 static void __perf_counter_enable(void *info)
825 {
826         struct perf_counter *counter = info;
827         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
828         struct perf_counter_context *ctx = counter->ctx;
829         struct perf_counter *leader = counter->group_leader;
830         int err;
831
832         /*
833          * If this is a per-task counter, need to check whether this
834          * counter's task is the current task on this cpu.
835          */
836         if (ctx->task && cpuctx->task_ctx != ctx) {
837                 if (cpuctx->task_ctx || ctx->task != current)
838                         return;
839                 cpuctx->task_ctx = ctx;
840         }
841
842         spin_lock(&ctx->lock);
843         ctx->is_active = 1;
844         update_context_time(ctx);
845
846         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
847                 goto unlock;
848         counter->state = PERF_COUNTER_STATE_INACTIVE;
849         counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
850
851         /*
852          * If the counter is in a group and isn't the group leader,
853          * then don't put it on unless the group is on.
854          */
855         if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
856                 goto unlock;
857
858         if (!group_can_go_on(counter, cpuctx, 1)) {
859                 err = -EEXIST;
860         } else {
861                 perf_disable();
862                 if (counter == leader)
863                         err = group_sched_in(counter, cpuctx, ctx,
864                                              smp_processor_id());
865                 else
866                         err = counter_sched_in(counter, cpuctx, ctx,
867                                                smp_processor_id());
868                 perf_enable();
869         }
870
871         if (err) {
872                 /*
873                  * If this counter can't go on and it's part of a
874                  * group, then the whole group has to come off.
875                  */
876                 if (leader != counter)
877                         group_sched_out(leader, cpuctx, ctx);
878                 if (leader->attr.pinned) {
879                         update_group_times(leader);
880                         leader->state = PERF_COUNTER_STATE_ERROR;
881                 }
882         }
883
884  unlock:
885         spin_unlock(&ctx->lock);
886 }
887
888 /*
889  * Enable a counter.
890  *
891  * If counter->ctx is a cloned context, callers must make sure that
892  * every task struct that counter->ctx->task could possibly point to
893  * remains valid.  This condition is satisfied when called through
894  * perf_counter_for_each_child or perf_counter_for_each as described
895  * for perf_counter_disable.
896  */
897 static void perf_counter_enable(struct perf_counter *counter)
898 {
899         struct perf_counter_context *ctx = counter->ctx;
900         struct task_struct *task = ctx->task;
901
902         if (!task) {
903                 /*
904                  * Enable the counter on the cpu that it's on
905                  */
906                 smp_call_function_single(counter->cpu, __perf_counter_enable,
907                                          counter, 1);
908                 return;
909         }
910
911         spin_lock_irq(&ctx->lock);
912         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
913                 goto out;
914
915         /*
916          * If the counter is in error state, clear that first.
917          * That way, if we see the counter in error state below, we
918          * know that it has gone back into error state, as distinct
919          * from the task having been scheduled away before the
920          * cross-call arrived.
921          */
922         if (counter->state == PERF_COUNTER_STATE_ERROR)
923                 counter->state = PERF_COUNTER_STATE_OFF;
924
925  retry:
926         spin_unlock_irq(&ctx->lock);
927         task_oncpu_function_call(task, __perf_counter_enable, counter);
928
929         spin_lock_irq(&ctx->lock);
930
931         /*
932          * If the context is active and the counter is still off,
933          * we need to retry the cross-call.
934          */
935         if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
936                 goto retry;
937
938         /*
939          * Since we have the lock this context can't be scheduled
940          * in, so we can change the state safely.
941          */
942         if (counter->state == PERF_COUNTER_STATE_OFF) {
943                 counter->state = PERF_COUNTER_STATE_INACTIVE;
944                 counter->tstamp_enabled =
945                         ctx->time - counter->total_time_enabled;
946         }
947  out:
948         spin_unlock_irq(&ctx->lock);
949 }
950
951 static int perf_counter_refresh(struct perf_counter *counter, int refresh)
952 {
953         /*
954          * not supported on inherited counters
955          */
956         if (counter->attr.inherit)
957                 return -EINVAL;
958
959         atomic_add(refresh, &counter->event_limit);
960         perf_counter_enable(counter);
961
962         return 0;
963 }
964
965 void __perf_counter_sched_out(struct perf_counter_context *ctx,
966                               struct perf_cpu_context *cpuctx)
967 {
968         struct perf_counter *counter;
969
970         spin_lock(&ctx->lock);
971         ctx->is_active = 0;
972         if (likely(!ctx->nr_counters))
973                 goto out;
974         update_context_time(ctx);
975
976         perf_disable();
977         if (ctx->nr_active) {
978                 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
979                         if (counter != counter->group_leader)
980                                 counter_sched_out(counter, cpuctx, ctx);
981                         else
982                                 group_sched_out(counter, cpuctx, ctx);
983                 }
984         }
985         perf_enable();
986  out:
987         spin_unlock(&ctx->lock);
988 }
989
990 /*
991  * Test whether two contexts are equivalent, i.e. whether they
992  * have both been cloned from the same version of the same context
993  * and they both have the same number of enabled counters.
994  * If the number of enabled counters is the same, then the set
995  * of enabled counters should be the same, because these are both
996  * inherited contexts, therefore we can't access individual counters
997  * in them directly with an fd; we can only enable/disable all
998  * counters via prctl, or enable/disable all counters in a family
999  * via ioctl, which will have the same effect on both contexts.
1000  */
1001 static int context_equiv(struct perf_counter_context *ctx1,
1002                          struct perf_counter_context *ctx2)
1003 {
1004         return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1005                 && ctx1->parent_gen == ctx2->parent_gen
1006                 && !ctx1->pin_count && !ctx2->pin_count;
1007 }
1008
1009 /*
1010  * Called from scheduler to remove the counters of the current task,
1011  * with interrupts disabled.
1012  *
1013  * We stop each counter and update the counter value in counter->count.
1014  *
1015  * This does not protect us against NMI, but disable()
1016  * sets the disabled bit in the control field of counter _before_
1017  * accessing the counter control register. If a NMI hits, then it will
1018  * not restart the counter.
1019  */
1020 void perf_counter_task_sched_out(struct task_struct *task,
1021                                  struct task_struct *next, int cpu)
1022 {
1023         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1024         struct perf_counter_context *ctx = task->perf_counter_ctxp;
1025         struct perf_counter_context *next_ctx;
1026         struct perf_counter_context *parent;
1027         struct pt_regs *regs;
1028         int do_switch = 1;
1029
1030         regs = task_pt_regs(task);
1031         perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
1032
1033         if (likely(!ctx || !cpuctx->task_ctx))
1034                 return;
1035
1036         update_context_time(ctx);
1037
1038         rcu_read_lock();
1039         parent = rcu_dereference(ctx->parent_ctx);
1040         next_ctx = next->perf_counter_ctxp;
1041         if (parent && next_ctx &&
1042             rcu_dereference(next_ctx->parent_ctx) == parent) {
1043                 /*
1044                  * Looks like the two contexts are clones, so we might be
1045                  * able to optimize the context switch.  We lock both
1046                  * contexts and check that they are clones under the
1047                  * lock (including re-checking that neither has been
1048                  * uncloned in the meantime).  It doesn't matter which
1049                  * order we take the locks because no other cpu could
1050                  * be trying to lock both of these tasks.
1051                  */
1052                 spin_lock(&ctx->lock);
1053                 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1054                 if (context_equiv(ctx, next_ctx)) {
1055                         /*
1056                          * XXX do we need a memory barrier of sorts
1057                          * wrt to rcu_dereference() of perf_counter_ctxp
1058                          */
1059                         task->perf_counter_ctxp = next_ctx;
1060                         next->perf_counter_ctxp = ctx;
1061                         ctx->task = next;
1062                         next_ctx->task = task;
1063                         do_switch = 0;
1064                 }
1065                 spin_unlock(&next_ctx->lock);
1066                 spin_unlock(&ctx->lock);
1067         }
1068         rcu_read_unlock();
1069
1070         if (do_switch) {
1071                 __perf_counter_sched_out(ctx, cpuctx);
1072                 cpuctx->task_ctx = NULL;
1073         }
1074 }
1075
1076 /*
1077  * Called with IRQs disabled
1078  */
1079 static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1080 {
1081         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1082
1083         if (!cpuctx->task_ctx)
1084                 return;
1085
1086         if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1087                 return;
1088
1089         __perf_counter_sched_out(ctx, cpuctx);
1090         cpuctx->task_ctx = NULL;
1091 }
1092
1093 /*
1094  * Called with IRQs disabled
1095  */
1096 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
1097 {
1098         __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
1099 }
1100
1101 static void
1102 __perf_counter_sched_in(struct perf_counter_context *ctx,
1103                         struct perf_cpu_context *cpuctx, int cpu)
1104 {
1105         struct perf_counter *counter;
1106         int can_add_hw = 1;
1107
1108         spin_lock(&ctx->lock);
1109         ctx->is_active = 1;
1110         if (likely(!ctx->nr_counters))
1111                 goto out;
1112
1113         ctx->timestamp = perf_clock();
1114
1115         perf_disable();
1116
1117         /*
1118          * First go through the list and put on any pinned groups
1119          * in order to give them the best chance of going on.
1120          */
1121         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1122                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1123                     !counter->attr.pinned)
1124                         continue;
1125                 if (counter->cpu != -1 && counter->cpu != cpu)
1126                         continue;
1127
1128                 if (counter != counter->group_leader)
1129                         counter_sched_in(counter, cpuctx, ctx, cpu);
1130                 else {
1131                         if (group_can_go_on(counter, cpuctx, 1))
1132                                 group_sched_in(counter, cpuctx, ctx, cpu);
1133                 }
1134
1135                 /*
1136                  * If this pinned group hasn't been scheduled,
1137                  * put it in error state.
1138                  */
1139                 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1140                         update_group_times(counter);
1141                         counter->state = PERF_COUNTER_STATE_ERROR;
1142                 }
1143         }
1144
1145         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1146                 /*
1147                  * Ignore counters in OFF or ERROR state, and
1148                  * ignore pinned counters since we did them already.
1149                  */
1150                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1151                     counter->attr.pinned)
1152                         continue;
1153
1154                 /*
1155                  * Listen to the 'cpu' scheduling filter constraint
1156                  * of counters:
1157                  */
1158                 if (counter->cpu != -1 && counter->cpu != cpu)
1159                         continue;
1160
1161                 if (counter != counter->group_leader) {
1162                         if (counter_sched_in(counter, cpuctx, ctx, cpu))
1163                                 can_add_hw = 0;
1164                 } else {
1165                         if (group_can_go_on(counter, cpuctx, can_add_hw)) {
1166                                 if (group_sched_in(counter, cpuctx, ctx, cpu))
1167                                         can_add_hw = 0;
1168                         }
1169                 }
1170         }
1171         perf_enable();
1172  out:
1173         spin_unlock(&ctx->lock);
1174 }
1175
1176 /*
1177  * Called from scheduler to add the counters of the current task
1178  * with interrupts disabled.
1179  *
1180  * We restore the counter value and then enable it.
1181  *
1182  * This does not protect us against NMI, but enable()
1183  * sets the enabled bit in the control field of counter _before_
1184  * accessing the counter control register. If a NMI hits, then it will
1185  * keep the counter running.
1186  */
1187 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
1188 {
1189         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1190         struct perf_counter_context *ctx = task->perf_counter_ctxp;
1191
1192         if (likely(!ctx))
1193                 return;
1194         if (cpuctx->task_ctx == ctx)
1195                 return;
1196         __perf_counter_sched_in(ctx, cpuctx, cpu);
1197         cpuctx->task_ctx = ctx;
1198 }
1199
1200 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1201 {
1202         struct perf_counter_context *ctx = &cpuctx->ctx;
1203
1204         __perf_counter_sched_in(ctx, cpuctx, cpu);
1205 }
1206
1207 #define MAX_INTERRUPTS (~0ULL)
1208
1209 static void perf_log_throttle(struct perf_counter *counter, int enable);
1210 static void perf_log_period(struct perf_counter *counter, u64 period);
1211
1212 static void perf_adjust_period(struct perf_counter *counter, u64 events)
1213 {
1214         struct hw_perf_counter *hwc = &counter->hw;
1215         u64 period, sample_period;
1216         s64 delta;
1217
1218         events *= hwc->sample_period;
1219         period = div64_u64(events, counter->attr.sample_freq);
1220
1221         delta = (s64)(period - hwc->sample_period);
1222         delta = (delta + 7) / 8; /* low pass filter */
1223
1224         sample_period = hwc->sample_period + delta;
1225
1226         if (!sample_period)
1227                 sample_period = 1;
1228
1229         perf_log_period(counter, sample_period);
1230
1231         hwc->sample_period = sample_period;
1232 }
1233
1234 static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1235 {
1236         struct perf_counter *counter;
1237         struct hw_perf_counter *hwc;
1238         u64 interrupts, freq;
1239
1240         spin_lock(&ctx->lock);
1241         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1242                 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1243                         continue;
1244
1245                 hwc = &counter->hw;
1246
1247                 interrupts = hwc->interrupts;
1248                 hwc->interrupts = 0;
1249
1250                 /*
1251                  * unthrottle counters on the tick
1252                  */
1253                 if (interrupts == MAX_INTERRUPTS) {
1254                         perf_log_throttle(counter, 1);
1255                         counter->pmu->unthrottle(counter);
1256                         interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
1257                 }
1258
1259                 if (!counter->attr.freq || !counter->attr.sample_freq)
1260                         continue;
1261
1262                 /*
1263                  * if the specified freq < HZ then we need to skip ticks
1264                  */
1265                 if (counter->attr.sample_freq < HZ) {
1266                         freq = counter->attr.sample_freq;
1267
1268                         hwc->freq_count += freq;
1269                         hwc->freq_interrupts += interrupts;
1270
1271                         if (hwc->freq_count < HZ)
1272                                 continue;
1273
1274                         interrupts = hwc->freq_interrupts;
1275                         hwc->freq_interrupts = 0;
1276                         hwc->freq_count -= HZ;
1277                 } else
1278                         freq = HZ;
1279
1280                 perf_adjust_period(counter, freq * interrupts);
1281
1282                 /*
1283                  * In order to avoid being stalled by an (accidental) huge
1284                  * sample period, force reset the sample period if we didn't
1285                  * get any events in this freq period.
1286                  */
1287                 if (!interrupts) {
1288                         perf_disable();
1289                         counter->pmu->disable(counter);
1290                         atomic64_set(&hwc->period_left, 0);
1291                         counter->pmu->enable(counter);
1292                         perf_enable();
1293                 }
1294         }
1295         spin_unlock(&ctx->lock);
1296 }
1297
1298 /*
1299  * Round-robin a context's counters:
1300  */
1301 static void rotate_ctx(struct perf_counter_context *ctx)
1302 {
1303         struct perf_counter *counter;
1304
1305         if (!ctx->nr_counters)
1306                 return;
1307
1308         spin_lock(&ctx->lock);
1309         /*
1310          * Rotate the first entry last (works just fine for group counters too):
1311          */
1312         perf_disable();
1313         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1314                 list_move_tail(&counter->list_entry, &ctx->counter_list);
1315                 break;
1316         }
1317         perf_enable();
1318
1319         spin_unlock(&ctx->lock);
1320 }
1321
1322 void perf_counter_task_tick(struct task_struct *curr, int cpu)
1323 {
1324         struct perf_cpu_context *cpuctx;
1325         struct perf_counter_context *ctx;
1326
1327         if (!atomic_read(&nr_counters))
1328                 return;
1329
1330         cpuctx = &per_cpu(perf_cpu_context, cpu);
1331         ctx = curr->perf_counter_ctxp;
1332
1333         perf_ctx_adjust_freq(&cpuctx->ctx);
1334         if (ctx)
1335                 perf_ctx_adjust_freq(ctx);
1336
1337         perf_counter_cpu_sched_out(cpuctx);
1338         if (ctx)
1339                 __perf_counter_task_sched_out(ctx);
1340
1341         rotate_ctx(&cpuctx->ctx);
1342         if (ctx)
1343                 rotate_ctx(ctx);
1344
1345         perf_counter_cpu_sched_in(cpuctx, cpu);
1346         if (ctx)
1347                 perf_counter_task_sched_in(curr, cpu);
1348 }
1349
1350 /*
1351  * Cross CPU call to read the hardware counter
1352  */
1353 static void __read(void *info)
1354 {
1355         struct perf_counter *counter = info;
1356         struct perf_counter_context *ctx = counter->ctx;
1357         unsigned long flags;
1358
1359         local_irq_save(flags);
1360         if (ctx->is_active)
1361                 update_context_time(ctx);
1362         counter->pmu->read(counter);
1363         update_counter_times(counter);
1364         local_irq_restore(flags);
1365 }
1366
1367 static u64 perf_counter_read(struct perf_counter *counter)
1368 {
1369         /*
1370          * If counter is enabled and currently active on a CPU, update the
1371          * value in the counter structure:
1372          */
1373         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1374                 smp_call_function_single(counter->oncpu,
1375                                          __read, counter, 1);
1376         } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1377                 update_counter_times(counter);
1378         }
1379
1380         return atomic64_read(&counter->count);
1381 }
1382
1383 /*
1384  * Initialize the perf_counter context in a task_struct:
1385  */
1386 static void
1387 __perf_counter_init_context(struct perf_counter_context *ctx,
1388                             struct task_struct *task)
1389 {
1390         memset(ctx, 0, sizeof(*ctx));
1391         spin_lock_init(&ctx->lock);
1392         mutex_init(&ctx->mutex);
1393         INIT_LIST_HEAD(&ctx->counter_list);
1394         INIT_LIST_HEAD(&ctx->event_list);
1395         atomic_set(&ctx->refcount, 1);
1396         ctx->task = task;
1397 }
1398
1399 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1400 {
1401         struct perf_counter_context *parent_ctx;
1402         struct perf_counter_context *ctx;
1403         struct perf_cpu_context *cpuctx;
1404         struct task_struct *task;
1405         unsigned long flags;
1406         int err;
1407
1408         /*
1409          * If cpu is not a wildcard then this is a percpu counter:
1410          */
1411         if (cpu != -1) {
1412                 /* Must be root to operate on a CPU counter: */
1413                 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1414                         return ERR_PTR(-EACCES);
1415
1416                 if (cpu < 0 || cpu > num_possible_cpus())
1417                         return ERR_PTR(-EINVAL);
1418
1419                 /*
1420                  * We could be clever and allow to attach a counter to an
1421                  * offline CPU and activate it when the CPU comes up, but
1422                  * that's for later.
1423                  */
1424                 if (!cpu_isset(cpu, cpu_online_map))
1425                         return ERR_PTR(-ENODEV);
1426
1427                 cpuctx = &per_cpu(perf_cpu_context, cpu);
1428                 ctx = &cpuctx->ctx;
1429                 get_ctx(ctx);
1430
1431                 return ctx;
1432         }
1433
1434         rcu_read_lock();
1435         if (!pid)
1436                 task = current;
1437         else
1438                 task = find_task_by_vpid(pid);
1439         if (task)
1440                 get_task_struct(task);
1441         rcu_read_unlock();
1442
1443         if (!task)
1444                 return ERR_PTR(-ESRCH);
1445
1446         /*
1447          * Can't attach counters to a dying task.
1448          */
1449         err = -ESRCH;
1450         if (task->flags & PF_EXITING)
1451                 goto errout;
1452
1453         /* Reuse ptrace permission checks for now. */
1454         err = -EACCES;
1455         if (!ptrace_may_access(task, PTRACE_MODE_READ))
1456                 goto errout;
1457
1458  retry:
1459         ctx = perf_lock_task_context(task, &flags);
1460         if (ctx) {
1461                 parent_ctx = ctx->parent_ctx;
1462                 if (parent_ctx) {
1463                         put_ctx(parent_ctx);
1464                         ctx->parent_ctx = NULL;         /* no longer a clone */
1465                 }
1466                 spin_unlock_irqrestore(&ctx->lock, flags);
1467         }
1468
1469         if (!ctx) {
1470                 ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
1471                 err = -ENOMEM;
1472                 if (!ctx)
1473                         goto errout;
1474                 __perf_counter_init_context(ctx, task);
1475                 get_ctx(ctx);
1476                 if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
1477                         /*
1478                          * We raced with some other task; use
1479                          * the context they set.
1480                          */
1481                         kfree(ctx);
1482                         goto retry;
1483                 }
1484                 get_task_struct(task);
1485         }
1486
1487         put_task_struct(task);
1488         return ctx;
1489
1490  errout:
1491         put_task_struct(task);
1492         return ERR_PTR(err);
1493 }
1494
1495 static void free_counter_rcu(struct rcu_head *head)
1496 {
1497         struct perf_counter *counter;
1498
1499         counter = container_of(head, struct perf_counter, rcu_head);
1500         if (counter->ns)
1501                 put_pid_ns(counter->ns);
1502         kfree(counter);
1503 }
1504
1505 static void perf_pending_sync(struct perf_counter *counter);
1506
1507 static void free_counter(struct perf_counter *counter)
1508 {
1509         perf_pending_sync(counter);
1510
1511         atomic_dec(&nr_counters);
1512         if (counter->attr.mmap)
1513                 atomic_dec(&nr_mmap_counters);
1514         if (counter->attr.comm)
1515                 atomic_dec(&nr_comm_counters);
1516
1517         if (counter->destroy)
1518                 counter->destroy(counter);
1519
1520         put_ctx(counter->ctx);
1521         call_rcu(&counter->rcu_head, free_counter_rcu);
1522 }
1523
1524 /*
1525  * Called when the last reference to the file is gone.
1526  */
1527 static int perf_release(struct inode *inode, struct file *file)
1528 {
1529         struct perf_counter *counter = file->private_data;
1530         struct perf_counter_context *ctx = counter->ctx;
1531
1532         file->private_data = NULL;
1533
1534         WARN_ON_ONCE(ctx->parent_ctx);
1535         mutex_lock(&ctx->mutex);
1536         perf_counter_remove_from_context(counter);
1537         mutex_unlock(&ctx->mutex);
1538
1539         mutex_lock(&counter->owner->perf_counter_mutex);
1540         list_del_init(&counter->owner_entry);
1541         mutex_unlock(&counter->owner->perf_counter_mutex);
1542         put_task_struct(counter->owner);
1543
1544         free_counter(counter);
1545
1546         return 0;
1547 }
1548
1549 /*
1550  * Read the performance counter - simple non blocking version for now
1551  */
1552 static ssize_t
1553 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1554 {
1555         u64 values[4];
1556         int n;
1557
1558         /*
1559          * Return end-of-file for a read on a counter that is in
1560          * error state (i.e. because it was pinned but it couldn't be
1561          * scheduled on to the CPU at some point).
1562          */
1563         if (counter->state == PERF_COUNTER_STATE_ERROR)
1564                 return 0;
1565
1566         WARN_ON_ONCE(counter->ctx->parent_ctx);
1567         mutex_lock(&counter->child_mutex);
1568         values[0] = perf_counter_read(counter);
1569         n = 1;
1570         if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1571                 values[n++] = counter->total_time_enabled +
1572                         atomic64_read(&counter->child_total_time_enabled);
1573         if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1574                 values[n++] = counter->total_time_running +
1575                         atomic64_read(&counter->child_total_time_running);
1576         if (counter->attr.read_format & PERF_FORMAT_ID)
1577                 values[n++] = counter->id;
1578         mutex_unlock(&counter->child_mutex);
1579
1580         if (count < n * sizeof(u64))
1581                 return -EINVAL;
1582         count = n * sizeof(u64);
1583
1584         if (copy_to_user(buf, values, count))
1585                 return -EFAULT;
1586
1587         return count;
1588 }
1589
1590 static ssize_t
1591 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1592 {
1593         struct perf_counter *counter = file->private_data;
1594
1595         return perf_read_hw(counter, buf, count);
1596 }
1597
1598 static unsigned int perf_poll(struct file *file, poll_table *wait)
1599 {
1600         struct perf_counter *counter = file->private_data;
1601         struct perf_mmap_data *data;
1602         unsigned int events = POLL_HUP;
1603
1604         rcu_read_lock();
1605         data = rcu_dereference(counter->data);
1606         if (data)
1607                 events = atomic_xchg(&data->poll, 0);
1608         rcu_read_unlock();
1609
1610         poll_wait(file, &counter->waitq, wait);
1611
1612         return events;
1613 }
1614
1615 static void perf_counter_reset(struct perf_counter *counter)
1616 {
1617         (void)perf_counter_read(counter);
1618         atomic64_set(&counter->count, 0);
1619         perf_counter_update_userpage(counter);
1620 }
1621
1622 /*
1623  * Holding the top-level counter's child_mutex means that any
1624  * descendant process that has inherited this counter will block
1625  * in sync_child_counter if it goes to exit, thus satisfying the
1626  * task existence requirements of perf_counter_enable/disable.
1627  */
1628 static void perf_counter_for_each_child(struct perf_counter *counter,
1629                                         void (*func)(struct perf_counter *))
1630 {
1631         struct perf_counter *child;
1632
1633         WARN_ON_ONCE(counter->ctx->parent_ctx);
1634         mutex_lock(&counter->child_mutex);
1635         func(counter);
1636         list_for_each_entry(child, &counter->child_list, child_list)
1637                 func(child);
1638         mutex_unlock(&counter->child_mutex);
1639 }
1640
1641 static void perf_counter_for_each(struct perf_counter *counter,
1642                                   void (*func)(struct perf_counter *))
1643 {
1644         struct perf_counter_context *ctx = counter->ctx;
1645         struct perf_counter *sibling;
1646
1647         WARN_ON_ONCE(ctx->parent_ctx);
1648         mutex_lock(&ctx->mutex);
1649         counter = counter->group_leader;
1650
1651         perf_counter_for_each_child(counter, func);
1652         func(counter);
1653         list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1654                 perf_counter_for_each_child(counter, func);
1655         mutex_unlock(&ctx->mutex);
1656 }
1657
1658 static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1659 {
1660         struct perf_counter_context *ctx = counter->ctx;
1661         unsigned long size;
1662         int ret = 0;
1663         u64 value;
1664
1665         if (!counter->attr.sample_period)
1666                 return -EINVAL;
1667
1668         size = copy_from_user(&value, arg, sizeof(value));
1669         if (size != sizeof(value))
1670                 return -EFAULT;
1671
1672         if (!value)
1673                 return -EINVAL;
1674
1675         spin_lock_irq(&ctx->lock);
1676         if (counter->attr.freq) {
1677                 if (value > sysctl_perf_counter_sample_rate) {
1678                         ret = -EINVAL;
1679                         goto unlock;
1680                 }
1681
1682                 counter->attr.sample_freq = value;
1683         } else {
1684                 perf_log_period(counter, value);
1685
1686                 counter->attr.sample_period = value;
1687                 counter->hw.sample_period = value;
1688         }
1689 unlock:
1690         spin_unlock_irq(&ctx->lock);
1691
1692         return ret;
1693 }
1694
1695 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1696 {
1697         struct perf_counter *counter = file->private_data;
1698         void (*func)(struct perf_counter *);
1699         u32 flags = arg;
1700
1701         switch (cmd) {
1702         case PERF_COUNTER_IOC_ENABLE:
1703                 func = perf_counter_enable;
1704                 break;
1705         case PERF_COUNTER_IOC_DISABLE:
1706                 func = perf_counter_disable;
1707                 break;
1708         case PERF_COUNTER_IOC_RESET:
1709                 func = perf_counter_reset;
1710                 break;
1711
1712         case PERF_COUNTER_IOC_REFRESH:
1713                 return perf_counter_refresh(counter, arg);
1714
1715         case PERF_COUNTER_IOC_PERIOD:
1716                 return perf_counter_period(counter, (u64 __user *)arg);
1717
1718         default:
1719                 return -ENOTTY;
1720         }
1721
1722         if (flags & PERF_IOC_FLAG_GROUP)
1723                 perf_counter_for_each(counter, func);
1724         else
1725                 perf_counter_for_each_child(counter, func);
1726
1727         return 0;
1728 }
1729
1730 int perf_counter_task_enable(void)
1731 {
1732         struct perf_counter *counter;
1733
1734         mutex_lock(&current->perf_counter_mutex);
1735         list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
1736                 perf_counter_for_each_child(counter, perf_counter_enable);
1737         mutex_unlock(&current->perf_counter_mutex);
1738
1739         return 0;
1740 }
1741
1742 int perf_counter_task_disable(void)
1743 {
1744         struct perf_counter *counter;
1745
1746         mutex_lock(&current->perf_counter_mutex);
1747         list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
1748                 perf_counter_for_each_child(counter, perf_counter_disable);
1749         mutex_unlock(&current->perf_counter_mutex);
1750
1751         return 0;
1752 }
1753
1754 /*
1755  * Callers need to ensure there can be no nesting of this function, otherwise
1756  * the seqlock logic goes bad. We can not serialize this because the arch
1757  * code calls this from NMI context.
1758  */
1759 void perf_counter_update_userpage(struct perf_counter *counter)
1760 {
1761         struct perf_counter_mmap_page *userpg;
1762         struct perf_mmap_data *data;
1763
1764         rcu_read_lock();
1765         data = rcu_dereference(counter->data);
1766         if (!data)
1767                 goto unlock;
1768
1769         userpg = data->user_page;
1770
1771         /*
1772          * Disable preemption so as to not let the corresponding user-space
1773          * spin too long if we get preempted.
1774          */
1775         preempt_disable();
1776         ++userpg->lock;
1777         barrier();
1778         userpg->index = counter->hw.idx;
1779         userpg->offset = atomic64_read(&counter->count);
1780         if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1781                 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1782
1783         barrier();
1784         ++userpg->lock;
1785         preempt_enable();
1786 unlock:
1787         rcu_read_unlock();
1788 }
1789
1790 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1791 {
1792         struct perf_counter *counter = vma->vm_file->private_data;
1793         struct perf_mmap_data *data;
1794         int ret = VM_FAULT_SIGBUS;
1795
1796         if (vmf->flags & FAULT_FLAG_MKWRITE) {
1797                 if (vmf->pgoff == 0)
1798                         ret = 0;
1799                 return ret;
1800         }
1801
1802         rcu_read_lock();
1803         data = rcu_dereference(counter->data);
1804         if (!data)
1805                 goto unlock;
1806
1807         if (vmf->pgoff == 0) {
1808                 vmf->page = virt_to_page(data->user_page);
1809         } else {
1810                 int nr = vmf->pgoff - 1;
1811
1812                 if ((unsigned)nr > data->nr_pages)
1813                         goto unlock;
1814
1815                 if (vmf->flags & FAULT_FLAG_WRITE)
1816                         goto unlock;
1817
1818                 vmf->page = virt_to_page(data->data_pages[nr]);
1819         }
1820
1821         get_page(vmf->page);
1822         vmf->page->mapping = vma->vm_file->f_mapping;
1823         vmf->page->index   = vmf->pgoff;
1824
1825         ret = 0;
1826 unlock:
1827         rcu_read_unlock();
1828
1829         return ret;
1830 }
1831
1832 static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1833 {
1834         struct perf_mmap_data *data;
1835         unsigned long size;
1836         int i;
1837
1838         WARN_ON(atomic_read(&counter->mmap_count));
1839
1840         size = sizeof(struct perf_mmap_data);
1841         size += nr_pages * sizeof(void *);
1842
1843         data = kzalloc(size, GFP_KERNEL);
1844         if (!data)
1845                 goto fail;
1846
1847         data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1848         if (!data->user_page)
1849                 goto fail_user_page;
1850
1851         for (i = 0; i < nr_pages; i++) {
1852                 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1853                 if (!data->data_pages[i])
1854                         goto fail_data_pages;
1855         }
1856
1857         data->nr_pages = nr_pages;
1858         atomic_set(&data->lock, -1);
1859
1860         rcu_assign_pointer(counter->data, data);
1861
1862         return 0;
1863
1864 fail_data_pages:
1865         for (i--; i >= 0; i--)
1866                 free_page((unsigned long)data->data_pages[i]);
1867
1868         free_page((unsigned long)data->user_page);
1869
1870 fail_user_page:
1871         kfree(data);
1872
1873 fail:
1874         return -ENOMEM;
1875 }
1876
1877 static void perf_mmap_free_page(unsigned long addr)
1878 {
1879         struct page *page = virt_to_page(addr);
1880
1881         page->mapping = NULL;
1882         __free_page(page);
1883 }
1884
1885 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1886 {
1887         struct perf_mmap_data *data;
1888         int i;
1889
1890         data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
1891
1892         perf_mmap_free_page((unsigned long)data->user_page);
1893         for (i = 0; i < data->nr_pages; i++)
1894                 perf_mmap_free_page((unsigned long)data->data_pages[i]);
1895
1896         kfree(data);
1897 }
1898
1899 static void perf_mmap_data_free(struct perf_counter *counter)
1900 {
1901         struct perf_mmap_data *data = counter->data;
1902
1903         WARN_ON(atomic_read(&counter->mmap_count));
1904
1905         rcu_assign_pointer(counter->data, NULL);
1906         call_rcu(&data->rcu_head, __perf_mmap_data_free);
1907 }
1908
1909 static void perf_mmap_open(struct vm_area_struct *vma)
1910 {
1911         struct perf_counter *counter = vma->vm_file->private_data;
1912
1913         atomic_inc(&counter->mmap_count);
1914 }
1915
1916 static void perf_mmap_close(struct vm_area_struct *vma)
1917 {
1918         struct perf_counter *counter = vma->vm_file->private_data;
1919
1920         WARN_ON_ONCE(counter->ctx->parent_ctx);
1921         if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
1922                 struct user_struct *user = current_user();
1923
1924                 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
1925                 vma->vm_mm->locked_vm -= counter->data->nr_locked;
1926                 perf_mmap_data_free(counter);
1927                 mutex_unlock(&counter->mmap_mutex);
1928         }
1929 }
1930
1931 static struct vm_operations_struct perf_mmap_vmops = {
1932         .open           = perf_mmap_open,
1933         .close          = perf_mmap_close,
1934         .fault          = perf_mmap_fault,
1935         .page_mkwrite   = perf_mmap_fault,
1936 };
1937
1938 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1939 {
1940         struct perf_counter *counter = file->private_data;
1941         unsigned long user_locked, user_lock_limit;
1942         struct user_struct *user = current_user();
1943         unsigned long locked, lock_limit;
1944         unsigned long vma_size;
1945         unsigned long nr_pages;
1946         long user_extra, extra;
1947         int ret = 0;
1948
1949         if (!(vma->vm_flags & VM_SHARED))
1950                 return -EINVAL;
1951
1952         vma_size = vma->vm_end - vma->vm_start;
1953         nr_pages = (vma_size / PAGE_SIZE) - 1;
1954
1955         /*
1956          * If we have data pages ensure they're a power-of-two number, so we
1957          * can do bitmasks instead of modulo.
1958          */
1959         if (nr_pages != 0 && !is_power_of_2(nr_pages))
1960                 return -EINVAL;
1961
1962         if (vma_size != PAGE_SIZE * (1 + nr_pages))
1963                 return -EINVAL;
1964
1965         if (vma->vm_pgoff != 0)
1966                 return -EINVAL;
1967
1968         WARN_ON_ONCE(counter->ctx->parent_ctx);
1969         mutex_lock(&counter->mmap_mutex);
1970         if (atomic_inc_not_zero(&counter->mmap_count)) {
1971                 if (nr_pages != counter->data->nr_pages)
1972                         ret = -EINVAL;
1973                 goto unlock;
1974         }
1975
1976         user_extra = nr_pages + 1;
1977         user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
1978
1979         /*
1980          * Increase the limit linearly with more CPUs:
1981          */
1982         user_lock_limit *= num_online_cpus();
1983
1984         user_locked = atomic_long_read(&user->locked_vm) + user_extra;
1985
1986         extra = 0;
1987         if (user_locked > user_lock_limit)
1988                 extra = user_locked - user_lock_limit;
1989
1990         lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1991         lock_limit >>= PAGE_SHIFT;
1992         locked = vma->vm_mm->locked_vm + extra;
1993
1994         if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
1995                 ret = -EPERM;
1996                 goto unlock;
1997         }
1998
1999         WARN_ON(counter->data);
2000         ret = perf_mmap_data_alloc(counter, nr_pages);
2001         if (ret)
2002                 goto unlock;
2003
2004         atomic_set(&counter->mmap_count, 1);
2005         atomic_long_add(user_extra, &user->locked_vm);
2006         vma->vm_mm->locked_vm += extra;
2007         counter->data->nr_locked = extra;
2008         if (vma->vm_flags & VM_WRITE)
2009                 counter->data->writable = 1;
2010
2011 unlock:
2012         mutex_unlock(&counter->mmap_mutex);
2013
2014         vma->vm_flags |= VM_RESERVED;
2015         vma->vm_ops = &perf_mmap_vmops;
2016
2017         return ret;
2018 }
2019
2020 static int perf_fasync(int fd, struct file *filp, int on)
2021 {
2022         struct inode *inode = filp->f_path.dentry->d_inode;
2023         struct perf_counter *counter = filp->private_data;
2024         int retval;
2025
2026         mutex_lock(&inode->i_mutex);
2027         retval = fasync_helper(fd, filp, on, &counter->fasync);
2028         mutex_unlock(&inode->i_mutex);
2029
2030         if (retval < 0)
2031                 return retval;
2032
2033         return 0;
2034 }
2035
2036 static const struct file_operations perf_fops = {
2037         .release                = perf_release,
2038         .read                   = perf_read,
2039         .poll                   = perf_poll,
2040         .unlocked_ioctl         = perf_ioctl,
2041         .compat_ioctl           = perf_ioctl,
2042         .mmap                   = perf_mmap,
2043         .fasync                 = perf_fasync,
2044 };
2045
2046 /*
2047  * Perf counter wakeup
2048  *
2049  * If there's data, ensure we set the poll() state and publish everything
2050  * to user-space before waking everybody up.
2051  */
2052
2053 void perf_counter_wakeup(struct perf_counter *counter)
2054 {
2055         wake_up_all(&counter->waitq);
2056
2057         if (counter->pending_kill) {
2058                 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
2059                 counter->pending_kill = 0;
2060         }
2061 }
2062
2063 /*
2064  * Pending wakeups
2065  *
2066  * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2067  *
2068  * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2069  * single linked list and use cmpxchg() to add entries lockless.
2070  */
2071
2072 static void perf_pending_counter(struct perf_pending_entry *entry)
2073 {
2074         struct perf_counter *counter = container_of(entry,
2075                         struct perf_counter, pending);
2076
2077         if (counter->pending_disable) {
2078                 counter->pending_disable = 0;
2079                 perf_counter_disable(counter);
2080         }
2081
2082         if (counter->pending_wakeup) {
2083                 counter->pending_wakeup = 0;
2084                 perf_counter_wakeup(counter);
2085         }
2086 }
2087
2088 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2089
2090 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
2091         PENDING_TAIL,
2092 };
2093
2094 static void perf_pending_queue(struct perf_pending_entry *entry,
2095                                void (*func)(struct perf_pending_entry *))
2096 {
2097         struct perf_pending_entry **head;
2098
2099         if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
2100                 return;
2101
2102         entry->func = func;
2103
2104         head = &get_cpu_var(perf_pending_head);
2105
2106         do {
2107                 entry->next = *head;
2108         } while (cmpxchg(head, entry->next, entry) != entry->next);
2109
2110         set_perf_counter_pending();
2111
2112         put_cpu_var(perf_pending_head);
2113 }
2114
2115 static int __perf_pending_run(void)
2116 {
2117         struct perf_pending_entry *list;
2118         int nr = 0;
2119
2120         list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2121         while (list != PENDING_TAIL) {
2122                 void (*func)(struct perf_pending_entry *);
2123                 struct perf_pending_entry *entry = list;
2124
2125                 list = list->next;
2126
2127                 func = entry->func;
2128                 entry->next = NULL;
2129                 /*
2130                  * Ensure we observe the unqueue before we issue the wakeup,
2131                  * so that we won't be waiting forever.
2132                  * -- see perf_not_pending().
2133                  */
2134                 smp_wmb();
2135
2136                 func(entry);
2137                 nr++;
2138         }
2139
2140         return nr;
2141 }
2142
2143 static inline int perf_not_pending(struct perf_counter *counter)
2144 {
2145         /*
2146          * If we flush on whatever cpu we run, there is a chance we don't
2147          * need to wait.
2148          */
2149         get_cpu();
2150         __perf_pending_run();
2151         put_cpu();
2152
2153         /*
2154          * Ensure we see the proper queue state before going to sleep
2155          * so that we do not miss the wakeup. -- see perf_pending_handle()
2156          */
2157         smp_rmb();
2158         return counter->pending.next == NULL;
2159 }
2160
2161 static void perf_pending_sync(struct perf_counter *counter)
2162 {
2163         wait_event(counter->waitq, perf_not_pending(counter));
2164 }
2165
2166 void perf_counter_do_pending(void)
2167 {
2168         __perf_pending_run();
2169 }
2170
2171 /*
2172  * Callchain support -- arch specific
2173  */
2174
2175 __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2176 {
2177         return NULL;
2178 }
2179
2180 /*
2181  * Output
2182  */
2183
2184 struct perf_output_handle {
2185         struct perf_counter     *counter;
2186         struct perf_mmap_data   *data;
2187         unsigned long           head;
2188         unsigned long           offset;
2189         int                     nmi;
2190         int                     sample;
2191         int                     locked;
2192         unsigned long           flags;
2193 };
2194
2195 static bool perf_output_space(struct perf_mmap_data *data,
2196                               unsigned int offset, unsigned int head)
2197 {
2198         unsigned long tail;
2199         unsigned long mask;
2200
2201         if (!data->writable)
2202                 return true;
2203
2204         mask = (data->nr_pages << PAGE_SHIFT) - 1;
2205         /*
2206          * Userspace could choose to issue a mb() before updating the tail
2207          * pointer. So that all reads will be completed before the write is
2208          * issued.
2209          */
2210         tail = ACCESS_ONCE(data->user_page->data_tail);
2211         smp_rmb();
2212
2213         offset = (offset - tail) & mask;
2214         head   = (head   - tail) & mask;
2215
2216         if ((int)(head - offset) < 0)
2217                 return false;
2218
2219         return true;
2220 }
2221
2222 static void perf_output_wakeup(struct perf_output_handle *handle)
2223 {
2224         atomic_set(&handle->data->poll, POLL_IN);
2225
2226         if (handle->nmi) {
2227                 handle->counter->pending_wakeup = 1;
2228                 perf_pending_queue(&handle->counter->pending,
2229                                    perf_pending_counter);
2230         } else
2231                 perf_counter_wakeup(handle->counter);
2232 }
2233
2234 /*
2235  * Curious locking construct.
2236  *
2237  * We need to ensure a later event doesn't publish a head when a former
2238  * event isn't done writing. However since we need to deal with NMIs we
2239  * cannot fully serialize things.
2240  *
2241  * What we do is serialize between CPUs so we only have to deal with NMI
2242  * nesting on a single CPU.
2243  *
2244  * We only publish the head (and generate a wakeup) when the outer-most
2245  * event completes.
2246  */
2247 static void perf_output_lock(struct perf_output_handle *handle)
2248 {
2249         struct perf_mmap_data *data = handle->data;
2250         int cpu;
2251
2252         handle->locked = 0;
2253
2254         local_irq_save(handle->flags);
2255         cpu = smp_processor_id();
2256
2257         if (in_nmi() && atomic_read(&data->lock) == cpu)
2258                 return;
2259
2260         while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2261                 cpu_relax();
2262
2263         handle->locked = 1;
2264 }
2265
2266 static void perf_output_unlock(struct perf_output_handle *handle)
2267 {
2268         struct perf_mmap_data *data = handle->data;
2269         unsigned long head;
2270         int cpu;
2271
2272         data->done_head = data->head;
2273
2274         if (!handle->locked)
2275                 goto out;
2276
2277 again:
2278         /*
2279          * The xchg implies a full barrier that ensures all writes are done
2280          * before we publish the new head, matched by a rmb() in userspace when
2281          * reading this position.
2282          */
2283         while ((head = atomic_long_xchg(&data->done_head, 0)))
2284                 data->user_page->data_head = head;
2285
2286         /*
2287          * NMI can happen here, which means we can miss a done_head update.
2288          */
2289
2290         cpu = atomic_xchg(&data->lock, -1);
2291         WARN_ON_ONCE(cpu != smp_processor_id());
2292
2293         /*
2294          * Therefore we have to validate we did not indeed do so.
2295          */
2296         if (unlikely(atomic_long_read(&data->done_head))) {
2297                 /*
2298                  * Since we had it locked, we can lock it again.
2299                  */
2300                 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2301                         cpu_relax();
2302
2303                 goto again;
2304         }
2305
2306         if (atomic_xchg(&data->wakeup, 0))
2307                 perf_output_wakeup(handle);
2308 out:
2309         local_irq_restore(handle->flags);
2310 }
2311
2312 static void perf_output_copy(struct perf_output_handle *handle,
2313                              const void *buf, unsigned int len)
2314 {
2315         unsigned int pages_mask;
2316         unsigned int offset;
2317         unsigned int size;
2318         void **pages;
2319
2320         offset          = handle->offset;
2321         pages_mask      = handle->data->nr_pages - 1;
2322         pages           = handle->data->data_pages;
2323
2324         do {
2325                 unsigned int page_offset;
2326                 int nr;
2327
2328                 nr          = (offset >> PAGE_SHIFT) & pages_mask;
2329                 page_offset = offset & (PAGE_SIZE - 1);
2330                 size        = min_t(unsigned int, PAGE_SIZE - page_offset, len);
2331
2332                 memcpy(pages[nr] + page_offset, buf, size);
2333
2334                 len         -= size;
2335                 buf         += size;
2336                 offset      += size;
2337         } while (len);
2338
2339         handle->offset = offset;
2340
2341         /*
2342          * Check we didn't copy past our reservation window, taking the
2343          * possible unsigned int wrap into account.
2344          */
2345         WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
2346 }
2347
2348 #define perf_output_put(handle, x) \
2349         perf_output_copy((handle), &(x), sizeof(x))
2350
2351 static int perf_output_begin(struct perf_output_handle *handle,
2352                              struct perf_counter *counter, unsigned int size,
2353                              int nmi, int sample)
2354 {
2355         struct perf_mmap_data *data;
2356         unsigned int offset, head;
2357         int have_lost;
2358         struct {
2359                 struct perf_event_header header;
2360                 u64                      id;
2361                 u64                      lost;
2362         } lost_event;
2363
2364         /*
2365          * For inherited counters we send all the output towards the parent.
2366          */
2367         if (counter->parent)
2368                 counter = counter->parent;
2369
2370         rcu_read_lock();
2371         data = rcu_dereference(counter->data);
2372         if (!data)
2373                 goto out;
2374
2375         handle->data    = data;
2376         handle->counter = counter;
2377         handle->nmi     = nmi;
2378         handle->sample  = sample;
2379
2380         if (!data->nr_pages)
2381                 goto fail;
2382
2383         have_lost = atomic_read(&data->lost);
2384         if (have_lost)
2385                 size += sizeof(lost_event);
2386
2387         perf_output_lock(handle);
2388
2389         do {
2390                 offset = head = atomic_long_read(&data->head);
2391                 head += size;
2392                 if (unlikely(!perf_output_space(data, offset, head)))
2393                         goto fail;
2394         } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2395
2396         handle->offset  = offset;
2397         handle->head    = head;
2398
2399         if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
2400                 atomic_set(&data->wakeup, 1);
2401
2402         if (have_lost) {
2403                 lost_event.header.type = PERF_EVENT_LOST;
2404                 lost_event.header.misc = 0;
2405                 lost_event.header.size = sizeof(lost_event);
2406                 lost_event.id          = counter->id;
2407                 lost_event.lost        = atomic_xchg(&data->lost, 0);
2408
2409                 perf_output_put(handle, lost_event);
2410         }
2411
2412         return 0;
2413
2414 fail:
2415         atomic_inc(&data->lost);
2416         perf_output_unlock(handle);
2417 out:
2418         rcu_read_unlock();
2419
2420         return -ENOSPC;
2421 }
2422
2423 static void perf_output_end(struct perf_output_handle *handle)
2424 {
2425         struct perf_counter *counter = handle->counter;
2426         struct perf_mmap_data *data = handle->data;
2427
2428         int wakeup_events = counter->attr.wakeup_events;
2429
2430         if (handle->sample && wakeup_events) {
2431                 int events = atomic_inc_return(&data->events);
2432                 if (events >= wakeup_events) {
2433                         atomic_sub(wakeup_events, &data->events);
2434                         atomic_set(&data->wakeup, 1);
2435                 }
2436         }
2437
2438         perf_output_unlock(handle);
2439         rcu_read_unlock();
2440 }
2441
2442 static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
2443 {
2444         /*
2445          * only top level counters have the pid namespace they were created in
2446          */
2447         if (counter->parent)
2448                 counter = counter->parent;
2449
2450         return task_tgid_nr_ns(p, counter->ns);
2451 }
2452
2453 static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2454 {
2455         /*
2456          * only top level counters have the pid namespace they were created in
2457          */
2458         if (counter->parent)
2459                 counter = counter->parent;
2460
2461         return task_pid_nr_ns(p, counter->ns);
2462 }
2463
2464 static void perf_counter_output(struct perf_counter *counter, int nmi,
2465                                 struct perf_sample_data *data)
2466 {
2467         int ret;
2468         u64 sample_type = counter->attr.sample_type;
2469         struct perf_output_handle handle;
2470         struct perf_event_header header;
2471         u64 ip;
2472         struct {
2473                 u32 pid, tid;
2474         } tid_entry;
2475         struct {
2476                 u64 id;
2477                 u64 counter;
2478         } group_entry;
2479         struct perf_callchain_entry *callchain = NULL;
2480         int callchain_size = 0;
2481         u64 time;
2482         struct {
2483                 u32 cpu, reserved;
2484         } cpu_entry;
2485
2486         header.type = 0;
2487         header.size = sizeof(header);
2488
2489         header.misc = PERF_EVENT_MISC_OVERFLOW;
2490         header.misc |= perf_misc_flags(data->regs);
2491
2492         if (sample_type & PERF_SAMPLE_IP) {
2493                 ip = perf_instruction_pointer(data->regs);
2494                 header.type |= PERF_SAMPLE_IP;
2495                 header.size += sizeof(ip);
2496         }
2497
2498         if (sample_type & PERF_SAMPLE_TID) {
2499                 /* namespace issues */
2500                 tid_entry.pid = perf_counter_pid(counter, current);
2501                 tid_entry.tid = perf_counter_tid(counter, current);
2502
2503                 header.type |= PERF_SAMPLE_TID;
2504                 header.size += sizeof(tid_entry);
2505         }
2506
2507         if (sample_type & PERF_SAMPLE_TIME) {
2508                 /*
2509                  * Maybe do better on x86 and provide cpu_clock_nmi()
2510                  */
2511                 time = sched_clock();
2512
2513                 header.type |= PERF_SAMPLE_TIME;
2514                 header.size += sizeof(u64);
2515         }
2516
2517         if (sample_type & PERF_SAMPLE_ADDR) {
2518                 header.type |= PERF_SAMPLE_ADDR;
2519                 header.size += sizeof(u64);
2520         }
2521
2522         if (sample_type & PERF_SAMPLE_ID) {
2523                 header.type |= PERF_SAMPLE_ID;
2524                 header.size += sizeof(u64);
2525         }
2526
2527         if (sample_type & PERF_SAMPLE_CPU) {
2528                 header.type |= PERF_SAMPLE_CPU;
2529                 header.size += sizeof(cpu_entry);
2530
2531                 cpu_entry.cpu = raw_smp_processor_id();
2532         }
2533
2534         if (sample_type & PERF_SAMPLE_PERIOD) {
2535                 header.type |= PERF_SAMPLE_PERIOD;
2536                 header.size += sizeof(u64);
2537         }
2538
2539         if (sample_type & PERF_SAMPLE_GROUP) {
2540                 header.type |= PERF_SAMPLE_GROUP;
2541                 header.size += sizeof(u64) +
2542                         counter->nr_siblings * sizeof(group_entry);
2543         }
2544
2545         if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2546                 callchain = perf_callchain(data->regs);
2547
2548                 if (callchain) {
2549                         callchain_size = (1 + callchain->nr) * sizeof(u64);
2550
2551                         header.type |= PERF_SAMPLE_CALLCHAIN;
2552                         header.size += callchain_size;
2553                 }
2554         }
2555
2556         ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2557         if (ret)
2558                 return;
2559
2560         perf_output_put(&handle, header);
2561
2562         if (sample_type & PERF_SAMPLE_IP)
2563                 perf_output_put(&handle, ip);
2564
2565         if (sample_type & PERF_SAMPLE_TID)
2566                 perf_output_put(&handle, tid_entry);
2567
2568         if (sample_type & PERF_SAMPLE_TIME)
2569                 perf_output_put(&handle, time);
2570
2571         if (sample_type & PERF_SAMPLE_ADDR)
2572                 perf_output_put(&handle, data->addr);
2573
2574         if (sample_type & PERF_SAMPLE_ID)
2575                 perf_output_put(&handle, counter->id);
2576
2577         if (sample_type & PERF_SAMPLE_CPU)
2578                 perf_output_put(&handle, cpu_entry);
2579
2580         if (sample_type & PERF_SAMPLE_PERIOD)
2581                 perf_output_put(&handle, data->period);
2582
2583         /*
2584          * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
2585          */
2586         if (sample_type & PERF_SAMPLE_GROUP) {
2587                 struct perf_counter *leader, *sub;
2588                 u64 nr = counter->nr_siblings;
2589
2590                 perf_output_put(&handle, nr);
2591
2592                 leader = counter->group_leader;
2593                 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2594                         if (sub != counter)
2595                                 sub->pmu->read(sub);
2596
2597                         group_entry.id = sub->id;
2598                         group_entry.counter = atomic64_read(&sub->count);
2599
2600                         perf_output_put(&handle, group_entry);
2601                 }
2602         }
2603
2604         if (callchain)
2605                 perf_output_copy(&handle, callchain, callchain_size);
2606
2607         perf_output_end(&handle);
2608 }
2609
2610 /*
2611  * fork tracking
2612  */
2613
2614 struct perf_fork_event {
2615         struct task_struct      *task;
2616
2617         struct {
2618                 struct perf_event_header        header;
2619
2620                 u32                             pid;
2621                 u32                             ppid;
2622         } event;
2623 };
2624
2625 static void perf_counter_fork_output(struct perf_counter *counter,
2626                                      struct perf_fork_event *fork_event)
2627 {
2628         struct perf_output_handle handle;
2629         int size = fork_event->event.header.size;
2630         struct task_struct *task = fork_event->task;
2631         int ret = perf_output_begin(&handle, counter, size, 0, 0);
2632
2633         if (ret)
2634                 return;
2635
2636         fork_event->event.pid = perf_counter_pid(counter, task);
2637         fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
2638
2639         perf_output_put(&handle, fork_event->event);
2640         perf_output_end(&handle);
2641 }
2642
2643 static int perf_counter_fork_match(struct perf_counter *counter)
2644 {
2645         if (counter->attr.comm || counter->attr.mmap)
2646                 return 1;
2647
2648         return 0;
2649 }
2650
2651 static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
2652                                   struct perf_fork_event *fork_event)
2653 {
2654         struct perf_counter *counter;
2655
2656         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2657                 return;
2658
2659         rcu_read_lock();
2660         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2661                 if (perf_counter_fork_match(counter))
2662                         perf_counter_fork_output(counter, fork_event);
2663         }
2664         rcu_read_unlock();
2665 }
2666
2667 static void perf_counter_fork_event(struct perf_fork_event *fork_event)
2668 {
2669         struct perf_cpu_context *cpuctx;
2670         struct perf_counter_context *ctx;
2671
2672         cpuctx = &get_cpu_var(perf_cpu_context);
2673         perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
2674         put_cpu_var(perf_cpu_context);
2675
2676         rcu_read_lock();
2677         /*
2678          * doesn't really matter which of the child contexts the
2679          * events ends up in.
2680          */
2681         ctx = rcu_dereference(current->perf_counter_ctxp);
2682         if (ctx)
2683                 perf_counter_fork_ctx(ctx, fork_event);
2684         rcu_read_unlock();
2685 }
2686
2687 void perf_counter_fork(struct task_struct *task)
2688 {
2689         struct perf_fork_event fork_event;
2690
2691         if (!atomic_read(&nr_comm_counters) &&
2692             !atomic_read(&nr_mmap_counters))
2693                 return;
2694
2695         fork_event = (struct perf_fork_event){
2696                 .task   = task,
2697                 .event  = {
2698                         .header = {
2699                                 .type = PERF_EVENT_FORK,
2700                                 .size = sizeof(fork_event.event),
2701                         },
2702                 },
2703         };
2704
2705         perf_counter_fork_event(&fork_event);
2706 }
2707
2708 /*
2709  * comm tracking
2710  */
2711
2712 struct perf_comm_event {
2713         struct task_struct      *task;
2714         char                    *comm;
2715         int                     comm_size;
2716
2717         struct {
2718                 struct perf_event_header        header;
2719
2720                 u32                             pid;
2721                 u32                             tid;
2722         } event;
2723 };
2724
2725 static void perf_counter_comm_output(struct perf_counter *counter,
2726                                      struct perf_comm_event *comm_event)
2727 {
2728         struct perf_output_handle handle;
2729         int size = comm_event->event.header.size;
2730         int ret = perf_output_begin(&handle, counter, size, 0, 0);
2731
2732         if (ret)
2733                 return;
2734
2735         comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
2736         comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
2737
2738         perf_output_put(&handle, comm_event->event);
2739         perf_output_copy(&handle, comm_event->comm,
2740                                    comm_event->comm_size);
2741         perf_output_end(&handle);
2742 }
2743
2744 static int perf_counter_comm_match(struct perf_counter *counter)
2745 {
2746         if (counter->attr.comm)
2747                 return 1;
2748
2749         return 0;
2750 }
2751
2752 static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
2753                                   struct perf_comm_event *comm_event)
2754 {
2755         struct perf_counter *counter;
2756
2757         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2758                 return;
2759
2760         rcu_read_lock();
2761         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2762                 if (perf_counter_comm_match(counter))
2763                         perf_counter_comm_output(counter, comm_event);
2764         }
2765         rcu_read_unlock();
2766 }
2767
2768 static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2769 {
2770         struct perf_cpu_context *cpuctx;
2771         struct perf_counter_context *ctx;
2772         unsigned int size;
2773         char *comm = comm_event->task->comm;
2774
2775         size = ALIGN(strlen(comm)+1, sizeof(u64));
2776
2777         comm_event->comm = comm;
2778         comm_event->comm_size = size;
2779
2780         comm_event->event.header.size = sizeof(comm_event->event) + size;
2781
2782         cpuctx = &get_cpu_var(perf_cpu_context);
2783         perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
2784         put_cpu_var(perf_cpu_context);
2785
2786         rcu_read_lock();
2787         /*
2788          * doesn't really matter which of the child contexts the
2789          * events ends up in.
2790          */
2791         ctx = rcu_dereference(current->perf_counter_ctxp);
2792         if (ctx)
2793                 perf_counter_comm_ctx(ctx, comm_event);
2794         rcu_read_unlock();
2795 }
2796
2797 void perf_counter_comm(struct task_struct *task)
2798 {
2799         struct perf_comm_event comm_event;
2800
2801         if (!atomic_read(&nr_comm_counters))
2802                 return;
2803
2804         comm_event = (struct perf_comm_event){
2805                 .task   = task,
2806                 .event  = {
2807                         .header = { .type = PERF_EVENT_COMM, },
2808                 },
2809         };
2810
2811         perf_counter_comm_event(&comm_event);
2812 }
2813
2814 /*
2815  * mmap tracking
2816  */
2817
2818 struct perf_mmap_event {
2819         struct vm_area_struct   *vma;
2820
2821         const char              *file_name;
2822         int                     file_size;
2823
2824         struct {
2825                 struct perf_event_header        header;
2826
2827                 u32                             pid;
2828                 u32                             tid;
2829                 u64                             start;
2830                 u64                             len;
2831                 u64                             pgoff;
2832         } event;
2833 };
2834
2835 static void perf_counter_mmap_output(struct perf_counter *counter,
2836                                      struct perf_mmap_event *mmap_event)
2837 {
2838         struct perf_output_handle handle;
2839         int size = mmap_event->event.header.size;
2840         int ret = perf_output_begin(&handle, counter, size, 0, 0);
2841
2842         if (ret)
2843                 return;
2844
2845         mmap_event->event.pid = perf_counter_pid(counter, current);
2846         mmap_event->event.tid = perf_counter_tid(counter, current);
2847
2848         perf_output_put(&handle, mmap_event->event);
2849         perf_output_copy(&handle, mmap_event->file_name,
2850                                    mmap_event->file_size);
2851         perf_output_end(&handle);
2852 }
2853
2854 static int perf_counter_mmap_match(struct perf_counter *counter,
2855                                    struct perf_mmap_event *mmap_event)
2856 {
2857         if (counter->attr.mmap)
2858                 return 1;
2859
2860         return 0;
2861 }
2862
2863 static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
2864                                   struct perf_mmap_event *mmap_event)
2865 {
2866         struct perf_counter *counter;
2867
2868         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2869                 return;
2870
2871         rcu_read_lock();
2872         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2873                 if (perf_counter_mmap_match(counter, mmap_event))
2874                         perf_counter_mmap_output(counter, mmap_event);
2875         }
2876         rcu_read_unlock();
2877 }
2878
2879 static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
2880 {
2881         struct perf_cpu_context *cpuctx;
2882         struct perf_counter_context *ctx;
2883         struct vm_area_struct *vma = mmap_event->vma;
2884         struct file *file = vma->vm_file;
2885         unsigned int size;
2886         char tmp[16];
2887         char *buf = NULL;
2888         const char *name;
2889
2890         if (file) {
2891                 buf = kzalloc(PATH_MAX, GFP_KERNEL);
2892                 if (!buf) {
2893                         name = strncpy(tmp, "//enomem", sizeof(tmp));
2894                         goto got_name;
2895                 }
2896                 name = d_path(&file->f_path, buf, PATH_MAX);
2897                 if (IS_ERR(name)) {
2898                         name = strncpy(tmp, "//toolong", sizeof(tmp));
2899                         goto got_name;
2900                 }
2901         } else {
2902                 name = arch_vma_name(mmap_event->vma);
2903                 if (name)
2904                         goto got_name;
2905
2906                 if (!vma->vm_mm) {
2907                         name = strncpy(tmp, "[vdso]", sizeof(tmp));
2908                         goto got_name;
2909                 }
2910
2911                 name = strncpy(tmp, "//anon", sizeof(tmp));
2912                 goto got_name;
2913         }
2914
2915 got_name:
2916         size = ALIGN(strlen(name)+1, sizeof(u64));
2917
2918         mmap_event->file_name = name;
2919         mmap_event->file_size = size;
2920
2921         mmap_event->event.header.size = sizeof(mmap_event->event) + size;
2922
2923         cpuctx = &get_cpu_var(perf_cpu_context);
2924         perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
2925         put_cpu_var(perf_cpu_context);
2926
2927         rcu_read_lock();
2928         /*
2929          * doesn't really matter which of the child contexts the
2930          * events ends up in.
2931          */
2932         ctx = rcu_dereference(current->perf_counter_ctxp);
2933         if (ctx)
2934                 perf_counter_mmap_ctx(ctx, mmap_event);
2935         rcu_read_unlock();
2936
2937         kfree(buf);
2938 }
2939
2940 void __perf_counter_mmap(struct vm_area_struct *vma)
2941 {
2942         struct perf_mmap_event mmap_event;
2943
2944         if (!atomic_read(&nr_mmap_counters))
2945                 return;
2946
2947         mmap_event = (struct perf_mmap_event){
2948                 .vma    = vma,
2949                 .event  = {
2950                         .header = { .type = PERF_EVENT_MMAP, },
2951                         .start  = vma->vm_start,
2952                         .len    = vma->vm_end - vma->vm_start,
2953                         .pgoff  = vma->vm_pgoff,
2954                 },
2955         };
2956
2957         perf_counter_mmap_event(&mmap_event);
2958 }
2959
2960 /*
2961  * Log sample_period changes so that analyzing tools can re-normalize the
2962  * event flow.
2963  */
2964
2965 struct freq_event {
2966         struct perf_event_header        header;
2967         u64                             time;
2968         u64                             id;
2969         u64                             period;
2970 };
2971
2972 static void perf_log_period(struct perf_counter *counter, u64 period)
2973 {
2974         struct perf_output_handle handle;
2975         struct freq_event event;
2976         int ret;
2977
2978         if (counter->hw.sample_period == period)
2979                 return;
2980
2981         if (counter->attr.sample_type & PERF_SAMPLE_PERIOD)
2982                 return;
2983
2984         event = (struct freq_event) {
2985                 .header = {
2986                         .type = PERF_EVENT_PERIOD,
2987                         .misc = 0,
2988                         .size = sizeof(event),
2989                 },
2990                 .time = sched_clock(),
2991                 .id = counter->id,
2992                 .period = period,
2993         };
2994
2995         ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0);
2996         if (ret)
2997                 return;
2998
2999         perf_output_put(&handle, event);
3000         perf_output_end(&handle);
3001 }
3002
3003 /*
3004  * IRQ throttle logging
3005  */
3006
3007 static void perf_log_throttle(struct perf_counter *counter, int enable)
3008 {
3009         struct perf_output_handle handle;
3010         int ret;
3011
3012         struct {
3013                 struct perf_event_header        header;
3014                 u64                             time;
3015                 u64                             id;
3016         } throttle_event = {
3017                 .header = {
3018                         .type = PERF_EVENT_THROTTLE + 1,
3019                         .misc = 0,
3020                         .size = sizeof(throttle_event),
3021                 },
3022                 .time   = sched_clock(),
3023                 .id     = counter->id,
3024         };
3025
3026         ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
3027         if (ret)
3028                 return;
3029
3030         perf_output_put(&handle, throttle_event);
3031         perf_output_end(&handle);
3032 }
3033
3034 /*
3035  * Generic counter overflow handling, sampling.
3036  */
3037
3038 int perf_counter_overflow(struct perf_counter *counter, int nmi,
3039                           struct perf_sample_data *data)
3040 {
3041         int events = atomic_read(&counter->event_limit);
3042         int throttle = counter->pmu->unthrottle != NULL;
3043         struct hw_perf_counter *hwc = &counter->hw;
3044         int ret = 0;
3045
3046         if (!throttle) {
3047                 hwc->interrupts++;
3048         } else {
3049                 if (hwc->interrupts != MAX_INTERRUPTS) {
3050                         hwc->interrupts++;
3051                         if (HZ * hwc->interrupts >
3052                                         (u64)sysctl_perf_counter_sample_rate) {
3053                                 hwc->interrupts = MAX_INTERRUPTS;
3054                                 perf_log_throttle(counter, 0);
3055                                 ret = 1;
3056                         }
3057                 } else {
3058                         /*
3059                          * Keep re-disabling counters even though on the previous
3060                          * pass we disabled it - just in case we raced with a
3061                          * sched-in and the counter got enabled again:
3062                          */
3063                         ret = 1;
3064                 }
3065         }
3066
3067         if (counter->attr.freq) {
3068                 u64 now = sched_clock();
3069                 s64 delta = now - hwc->freq_stamp;
3070
3071                 hwc->freq_stamp = now;
3072
3073                 if (delta > 0 && delta < TICK_NSEC)
3074                         perf_adjust_period(counter, NSEC_PER_SEC / (int)delta);
3075         }
3076
3077         /*
3078          * XXX event_limit might not quite work as expected on inherited
3079          * counters
3080          */
3081
3082         counter->pending_kill = POLL_IN;
3083         if (events && atomic_dec_and_test(&counter->event_limit)) {
3084                 ret = 1;
3085                 counter->pending_kill = POLL_HUP;
3086                 if (nmi) {
3087                         counter->pending_disable = 1;
3088                         perf_pending_queue(&counter->pending,
3089                                            perf_pending_counter);
3090                 } else
3091                         perf_counter_disable(counter);
3092         }
3093
3094         perf_counter_output(counter, nmi, data);
3095         return ret;
3096 }
3097
3098 /*
3099  * Generic software counter infrastructure
3100  */
3101
3102 static void perf_swcounter_update(struct perf_counter *counter)
3103 {
3104         struct hw_perf_counter *hwc = &counter->hw;
3105         u64 prev, now;
3106         s64 delta;
3107
3108 again:
3109         prev = atomic64_read(&hwc->prev_count);
3110         now = atomic64_read(&hwc->count);
3111         if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
3112                 goto again;
3113
3114         delta = now - prev;
3115
3116         atomic64_add(delta, &counter->count);
3117         atomic64_sub(delta, &hwc->period_left);
3118 }
3119
3120 static void perf_swcounter_set_period(struct perf_counter *counter)
3121 {
3122         struct hw_perf_counter *hwc = &counter->hw;
3123         s64 left = atomic64_read(&hwc->period_left);
3124         s64 period = hwc->sample_period;
3125
3126         if (unlikely(left <= -period)) {
3127                 left = period;
3128                 atomic64_set(&hwc->period_left, left);
3129                 hwc->last_period = period;
3130         }
3131
3132         if (unlikely(left <= 0)) {
3133                 left += period;
3134                 atomic64_add(period, &hwc->period_left);
3135                 hwc->last_period = period;
3136         }
3137
3138         atomic64_set(&hwc->prev_count, -left);
3139         atomic64_set(&hwc->count, -left);
3140 }
3141
3142 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3143 {
3144         enum hrtimer_restart ret = HRTIMER_RESTART;
3145         struct perf_sample_data data;
3146         struct perf_counter *counter;
3147         u64 period;
3148
3149         counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3150         counter->pmu->read(counter);
3151
3152         data.addr = 0;
3153         data.regs = get_irq_regs();
3154         /*
3155          * In case we exclude kernel IPs or are somehow not in interrupt
3156          * context, provide the next best thing, the user IP.
3157          */
3158         if ((counter->attr.exclude_kernel || !data.regs) &&
3159                         !counter->attr.exclude_user)
3160                 data.regs = task_pt_regs(current);
3161
3162         if (data.regs) {
3163                 if (perf_counter_overflow(counter, 0, &data))
3164                         ret = HRTIMER_NORESTART;
3165         }
3166
3167         period = max_t(u64, 10000, counter->hw.sample_period);
3168         hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3169
3170         return ret;
3171 }
3172
3173 static void perf_swcounter_overflow(struct perf_counter *counter,
3174                                     int nmi, struct pt_regs *regs, u64 addr)
3175 {
3176         struct perf_sample_data data = {
3177                 .regs   = regs,
3178                 .addr   = addr,
3179                 .period = counter->hw.last_period,
3180         };
3181
3182         perf_swcounter_update(counter);
3183         perf_swcounter_set_period(counter);
3184         if (perf_counter_overflow(counter, nmi, &data))
3185                 /* soft-disable the counter */
3186                 ;
3187
3188 }
3189
3190 static int perf_swcounter_is_counting(struct perf_counter *counter)
3191 {
3192         struct perf_counter_context *ctx;
3193         unsigned long flags;
3194         int count;
3195
3196         if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3197                 return 1;
3198
3199         if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3200                 return 0;
3201
3202         /*
3203          * If the counter is inactive, it could be just because
3204          * its task is scheduled out, or because it's in a group
3205          * which could not go on the PMU.  We want to count in
3206          * the first case but not the second.  If the context is
3207          * currently active then an inactive software counter must
3208          * be the second case.  If it's not currently active then
3209          * we need to know whether the counter was active when the
3210          * context was last active, which we can determine by
3211          * comparing counter->tstamp_stopped with ctx->time.
3212          *
3213          * We are within an RCU read-side critical section,
3214          * which protects the existence of *ctx.
3215          */
3216         ctx = counter->ctx;
3217         spin_lock_irqsave(&ctx->lock, flags);
3218         count = 1;
3219         /* Re-check state now we have the lock */
3220         if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
3221             counter->ctx->is_active ||
3222             counter->tstamp_stopped < ctx->time)
3223                 count = 0;
3224         spin_unlock_irqrestore(&ctx->lock, flags);
3225         return count;
3226 }
3227
3228 static int perf_swcounter_match(struct perf_counter *counter,
3229                                 enum perf_type_id type,
3230                                 u32 event, struct pt_regs *regs)
3231 {
3232         if (!perf_swcounter_is_counting(counter))
3233                 return 0;
3234
3235         if (counter->attr.type != type)
3236                 return 0;
3237         if (counter->attr.config != event)
3238                 return 0;
3239
3240         if (regs) {
3241                 if (counter->attr.exclude_user && user_mode(regs))
3242                         return 0;
3243
3244                 if (counter->attr.exclude_kernel && !user_mode(regs))
3245                         return 0;
3246         }
3247
3248         return 1;
3249 }
3250
3251 static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3252                                int nmi, struct pt_regs *regs, u64 addr)
3253 {
3254         int neg = atomic64_add_negative(nr, &counter->hw.count);
3255
3256         if (counter->hw.sample_period && !neg && regs)
3257                 perf_swcounter_overflow(counter, nmi, regs, addr);
3258 }
3259
3260 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3261                                      enum perf_type_id type, u32 event,
3262                                      u64 nr, int nmi, struct pt_regs *regs,
3263                                      u64 addr)
3264 {
3265         struct perf_counter *counter;
3266
3267         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3268                 return;
3269
3270         rcu_read_lock();
3271         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3272                 if (perf_swcounter_match(counter, type, event, regs))
3273                         perf_swcounter_add(counter, nr, nmi, regs, addr);
3274         }
3275         rcu_read_unlock();
3276 }
3277
3278 static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
3279 {
3280         if (in_nmi())
3281                 return &cpuctx->recursion[3];
3282
3283         if (in_irq())
3284                 return &cpuctx->recursion[2];
3285
3286         if (in_softirq())
3287                 return &cpuctx->recursion[1];
3288
3289         return &cpuctx->recursion[0];
3290 }
3291
3292 static void __perf_swcounter_event(enum perf_type_id type, u32 event,
3293                                    u64 nr, int nmi, struct pt_regs *regs,
3294                                    u64 addr)
3295 {
3296         struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3297         int *recursion = perf_swcounter_recursion_context(cpuctx);
3298         struct perf_counter_context *ctx;
3299
3300         if (*recursion)
3301                 goto out;
3302
3303         (*recursion)++;
3304         barrier();
3305
3306         perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
3307                                  nr, nmi, regs, addr);
3308         rcu_read_lock();
3309         /*
3310          * doesn't really matter which of the child contexts the
3311          * events ends up in.
3312          */
3313         ctx = rcu_dereference(current->perf_counter_ctxp);
3314         if (ctx)
3315                 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr);
3316         rcu_read_unlock();
3317
3318         barrier();
3319         (*recursion)--;
3320
3321 out:
3322         put_cpu_var(perf_cpu_context);
3323 }
3324
3325 void
3326 perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
3327 {
3328         __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
3329 }
3330
3331 static void perf_swcounter_read(struct perf_counter *counter)
3332 {
3333         perf_swcounter_update(counter);
3334 }
3335
3336 static int perf_swcounter_enable(struct perf_counter *counter)
3337 {
3338         perf_swcounter_set_period(counter);
3339         return 0;
3340 }
3341
3342 static void perf_swcounter_disable(struct perf_counter *counter)
3343 {
3344         perf_swcounter_update(counter);
3345 }
3346
3347 static const struct pmu perf_ops_generic = {
3348         .enable         = perf_swcounter_enable,
3349         .disable        = perf_swcounter_disable,
3350         .read           = perf_swcounter_read,
3351 };
3352
3353 /*
3354  * Software counter: cpu wall time clock
3355  */
3356
3357 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
3358 {
3359         int cpu = raw_smp_processor_id();
3360         s64 prev;
3361         u64 now;
3362
3363         now = cpu_clock(cpu);
3364         prev = atomic64_read(&counter->hw.prev_count);
3365         atomic64_set(&counter->hw.prev_count, now);
3366         atomic64_add(now - prev, &counter->count);
3367 }
3368
3369 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
3370 {
3371         struct hw_perf_counter *hwc = &counter->hw;
3372         int cpu = raw_smp_processor_id();
3373
3374         atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3375         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3376         hwc->hrtimer.function = perf_swcounter_hrtimer;
3377         if (hwc->sample_period) {
3378                 u64 period = max_t(u64, 10000, hwc->sample_period);
3379                 __hrtimer_start_range_ns(&hwc->hrtimer,
3380                                 ns_to_ktime(period), 0,
3381                                 HRTIMER_MODE_REL, 0);
3382         }
3383
3384         return 0;
3385 }
3386
3387 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
3388 {
3389         if (counter->hw.sample_period)
3390                 hrtimer_cancel(&counter->hw.hrtimer);
3391         cpu_clock_perf_counter_update(counter);
3392 }
3393
3394 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
3395 {
3396         cpu_clock_perf_counter_update(counter);
3397 }
3398
3399 static const struct pmu perf_ops_cpu_clock = {
3400         .enable         = cpu_clock_perf_counter_enable,
3401         .disable        = cpu_clock_perf_counter_disable,
3402         .read           = cpu_clock_perf_counter_read,
3403 };
3404
3405 /*
3406  * Software counter: task time clock
3407  */
3408
3409 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
3410 {
3411         u64 prev;
3412         s64 delta;
3413
3414         prev = atomic64_xchg(&counter->hw.prev_count, now);
3415         delta = now - prev;
3416         atomic64_add(delta, &counter->count);
3417 }
3418
3419 static int task_clock_perf_counter_enable(struct perf_counter *counter)
3420 {
3421         struct hw_perf_counter *hwc = &counter->hw;
3422         u64 now;
3423
3424         now = counter->ctx->time;
3425
3426         atomic64_set(&hwc->prev_count, now);
3427         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3428         hwc->hrtimer.function = perf_swcounter_hrtimer;
3429         if (hwc->sample_period) {
3430                 u64 period = max_t(u64, 10000, hwc->sample_period);
3431                 __hrtimer_start_range_ns(&hwc->hrtimer,
3432                                 ns_to_ktime(period), 0,
3433                                 HRTIMER_MODE_REL, 0);
3434         }
3435
3436         return 0;
3437 }
3438
3439 static void task_clock_perf_counter_disable(struct perf_counter *counter)
3440 {
3441         if (counter->hw.sample_period)
3442                 hrtimer_cancel(&counter->hw.hrtimer);
3443         task_clock_perf_counter_update(counter, counter->ctx->time);
3444
3445 }
3446
3447 static void task_clock_perf_counter_read(struct perf_counter *counter)
3448 {
3449         u64 time;
3450
3451         if (!in_nmi()) {
3452                 update_context_time(counter->ctx);
3453                 time = counter->ctx->time;
3454         } else {
3455                 u64 now = perf_clock();
3456                 u64 delta = now - counter->ctx->timestamp;
3457                 time = counter->ctx->time + delta;
3458         }
3459
3460         task_clock_perf_counter_update(counter, time);
3461 }
3462
3463 static const struct pmu perf_ops_task_clock = {
3464         .enable         = task_clock_perf_counter_enable,
3465         .disable        = task_clock_perf_counter_disable,
3466         .read           = task_clock_perf_counter_read,
3467 };
3468
3469 #ifdef CONFIG_EVENT_PROFILE
3470 void perf_tpcounter_event(int event_id)
3471 {
3472         struct pt_regs *regs = get_irq_regs();
3473
3474         if (!regs)
3475                 regs = task_pt_regs(current);
3476
3477         __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
3478 }
3479 EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3480
3481 extern int ftrace_profile_enable(int);
3482 extern void ftrace_profile_disable(int);
3483
3484 static void tp_perf_counter_destroy(struct perf_counter *counter)
3485 {
3486         ftrace_profile_disable(perf_event_id(&counter->attr));
3487 }
3488
3489 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3490 {
3491         int event_id = perf_event_id(&counter->attr);
3492         int ret;
3493
3494         ret = ftrace_profile_enable(event_id);
3495         if (ret)
3496                 return NULL;
3497
3498         counter->destroy = tp_perf_counter_destroy;
3499
3500         return &perf_ops_generic;
3501 }
3502 #else
3503 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3504 {
3505         return NULL;
3506 }
3507 #endif
3508
3509 static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
3510 {
3511         const struct pmu *pmu = NULL;
3512
3513         /*
3514          * Software counters (currently) can't in general distinguish
3515          * between user, kernel and hypervisor events.
3516          * However, context switches and cpu migrations are considered
3517          * to be kernel events, and page faults are never hypervisor
3518          * events.
3519          */
3520         switch (counter->attr.config) {
3521         case PERF_COUNT_SW_CPU_CLOCK:
3522                 pmu = &perf_ops_cpu_clock;
3523
3524                 break;
3525         case PERF_COUNT_SW_TASK_CLOCK:
3526                 /*
3527                  * If the user instantiates this as a per-cpu counter,
3528                  * use the cpu_clock counter instead.
3529                  */
3530                 if (counter->ctx->task)
3531                         pmu = &perf_ops_task_clock;
3532                 else
3533                         pmu = &perf_ops_cpu_clock;
3534
3535                 break;
3536         case PERF_COUNT_SW_PAGE_FAULTS:
3537         case PERF_COUNT_SW_PAGE_FAULTS_MIN:
3538         case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
3539         case PERF_COUNT_SW_CONTEXT_SWITCHES:
3540         case PERF_COUNT_SW_CPU_MIGRATIONS:
3541                 pmu = &perf_ops_generic;
3542                 break;
3543         }
3544
3545         return pmu;
3546 }
3547
3548 /*
3549  * Allocate and initialize a counter structure
3550  */
3551 static struct perf_counter *
3552 perf_counter_alloc(struct perf_counter_attr *attr,
3553                    int cpu,
3554                    struct perf_counter_context *ctx,
3555                    struct perf_counter *group_leader,
3556                    gfp_t gfpflags)
3557 {
3558         const struct pmu *pmu;
3559         struct perf_counter *counter;
3560         struct hw_perf_counter *hwc;
3561         long err;
3562
3563         counter = kzalloc(sizeof(*counter), gfpflags);
3564         if (!counter)
3565                 return ERR_PTR(-ENOMEM);
3566
3567         /*
3568          * Single counters are their own group leaders, with an
3569          * empty sibling list:
3570          */
3571         if (!group_leader)
3572                 group_leader = counter;
3573
3574         mutex_init(&counter->child_mutex);
3575         INIT_LIST_HEAD(&counter->child_list);
3576
3577         INIT_LIST_HEAD(&counter->list_entry);
3578         INIT_LIST_HEAD(&counter->event_entry);
3579         INIT_LIST_HEAD(&counter->sibling_list);
3580         init_waitqueue_head(&counter->waitq);
3581
3582         mutex_init(&counter->mmap_mutex);
3583
3584         counter->cpu            = cpu;
3585         counter->attr           = *attr;
3586         counter->group_leader   = group_leader;
3587         counter->pmu            = NULL;
3588         counter->ctx            = ctx;
3589         counter->oncpu          = -1;
3590
3591         counter->ns             = get_pid_ns(current->nsproxy->pid_ns);
3592         counter->id             = atomic64_inc_return(&perf_counter_id);
3593
3594         counter->state          = PERF_COUNTER_STATE_INACTIVE;
3595
3596         if (attr->disabled)
3597                 counter->state = PERF_COUNTER_STATE_OFF;
3598
3599         pmu = NULL;
3600
3601         hwc = &counter->hw;
3602         hwc->sample_period = attr->sample_period;
3603         if (attr->freq && attr->sample_freq)
3604                 hwc->sample_period = 1;
3605
3606         atomic64_set(&hwc->period_left, hwc->sample_period);
3607
3608         /*
3609          * we currently do not support PERF_SAMPLE_GROUP on inherited counters
3610          */
3611         if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
3612                 goto done;
3613
3614         switch (attr->type) {
3615         case PERF_TYPE_RAW:
3616         case PERF_TYPE_HARDWARE:
3617         case PERF_TYPE_HW_CACHE:
3618                 pmu = hw_perf_counter_init(counter);
3619                 break;
3620
3621         case PERF_TYPE_SOFTWARE:
3622                 pmu = sw_perf_counter_init(counter);
3623                 break;
3624
3625         case PERF_TYPE_TRACEPOINT:
3626                 pmu = tp_perf_counter_init(counter);
3627                 break;
3628
3629         default:
3630                 break;
3631         }
3632 done:
3633         err = 0;
3634         if (!pmu)
3635                 err = -EINVAL;
3636         else if (IS_ERR(pmu))
3637                 err = PTR_ERR(pmu);
3638
3639         if (err) {
3640                 if (counter->ns)
3641                         put_pid_ns(counter->ns);
3642                 kfree(counter);
3643                 return ERR_PTR(err);
3644         }
3645
3646         counter->pmu = pmu;
3647
3648         atomic_inc(&nr_counters);
3649         if (counter->attr.mmap)
3650                 atomic_inc(&nr_mmap_counters);
3651         if (counter->attr.comm)
3652                 atomic_inc(&nr_comm_counters);
3653
3654         return counter;
3655 }
3656
3657 static int perf_copy_attr(struct perf_counter_attr __user *uattr,
3658                           struct perf_counter_attr *attr)
3659 {
3660         int ret;
3661         u32 size;
3662
3663         if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
3664                 return -EFAULT;
3665
3666         /*
3667          * zero the full structure, so that a short copy will be nice.
3668          */
3669         memset(attr, 0, sizeof(*attr));
3670
3671         ret = get_user(size, &uattr->size);
3672         if (ret)
3673                 return ret;
3674
3675         if (size > PAGE_SIZE)   /* silly large */
3676                 goto err_size;
3677
3678         if (!size)              /* abi compat */
3679                 size = PERF_ATTR_SIZE_VER0;
3680
3681         if (size < PERF_ATTR_SIZE_VER0)
3682                 goto err_size;
3683
3684         /*
3685          * If we're handed a bigger struct than we know of,
3686          * ensure all the unknown bits are 0.
3687          */
3688         if (size > sizeof(*attr)) {
3689                 unsigned long val;
3690                 unsigned long __user *addr;
3691                 unsigned long __user *end;
3692
3693                 addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr),
3694                                 sizeof(unsigned long));
3695                 end  = PTR_ALIGN((void __user *)uattr + size,
3696                                 sizeof(unsigned long));
3697
3698                 for (; addr < end; addr += sizeof(unsigned long)) {
3699                         ret = get_user(val, addr);
3700                         if (ret)
3701                                 return ret;
3702                         if (val)
3703                                 goto err_size;
3704                 }
3705         }
3706
3707         ret = copy_from_user(attr, uattr, size);
3708         if (ret)
3709                 return -EFAULT;
3710
3711         /*
3712          * If the type exists, the corresponding creation will verify
3713          * the attr->config.
3714          */
3715         if (attr->type >= PERF_TYPE_MAX)
3716                 return -EINVAL;
3717
3718         if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
3719                 return -EINVAL;
3720
3721         if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
3722                 return -EINVAL;
3723
3724         if (attr->read_format & ~(PERF_FORMAT_MAX-1))
3725                 return -EINVAL;
3726
3727 out:
3728         return ret;
3729
3730 err_size:
3731         put_user(sizeof(*attr), &uattr->size);
3732         ret = -E2BIG;
3733         goto out;
3734 }
3735
3736 /**
3737  * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
3738  *
3739  * @attr_uptr:  event type attributes for monitoring/sampling
3740  * @pid:                target pid
3741  * @cpu:                target cpu
3742  * @group_fd:           group leader counter fd
3743  */
3744 SYSCALL_DEFINE5(perf_counter_open,
3745                 struct perf_counter_attr __user *, attr_uptr,
3746                 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
3747 {
3748         struct perf_counter *counter, *group_leader;
3749         struct perf_counter_attr attr;
3750         struct perf_counter_context *ctx;
3751         struct file *counter_file = NULL;
3752         struct file *group_file = NULL;
3753         int fput_needed = 0;
3754         int fput_needed2 = 0;
3755         int ret;
3756
3757         /* for future expandability... */
3758         if (flags)
3759                 return -EINVAL;
3760
3761         ret = perf_copy_attr(attr_uptr, &attr);
3762         if (ret)
3763                 return ret;
3764
3765         if (!attr.exclude_kernel) {
3766                 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
3767                         return -EACCES;
3768         }
3769
3770         if (attr.freq) {
3771                 if (attr.sample_freq > sysctl_perf_counter_sample_rate)
3772                         return -EINVAL;
3773         }
3774
3775         /*
3776          * Get the target context (task or percpu):
3777          */
3778         ctx = find_get_context(pid, cpu);
3779         if (IS_ERR(ctx))
3780                 return PTR_ERR(ctx);
3781
3782         /*
3783          * Look up the group leader (we will attach this counter to it):
3784          */
3785         group_leader = NULL;
3786         if (group_fd != -1) {
3787                 ret = -EINVAL;
3788                 group_file = fget_light(group_fd, &fput_needed);
3789                 if (!group_file)
3790                         goto err_put_context;
3791                 if (group_file->f_op != &perf_fops)
3792                         goto err_put_context;
3793
3794                 group_leader = group_file->private_data;
3795                 /*
3796                  * Do not allow a recursive hierarchy (this new sibling
3797                  * becoming part of another group-sibling):
3798                  */
3799                 if (group_leader->group_leader != group_leader)
3800                         goto err_put_context;
3801                 /*
3802                  * Do not allow to attach to a group in a different
3803                  * task or CPU context:
3804                  */
3805                 if (group_leader->ctx != ctx)
3806                         goto err_put_context;
3807                 /*
3808                  * Only a group leader can be exclusive or pinned
3809                  */
3810                 if (attr.exclusive || attr.pinned)
3811                         goto err_put_context;
3812         }
3813
3814         counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
3815                                      GFP_KERNEL);
3816         ret = PTR_ERR(counter);
3817         if (IS_ERR(counter))
3818                 goto err_put_context;
3819
3820         ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
3821         if (ret < 0)
3822                 goto err_free_put_context;
3823
3824         counter_file = fget_light(ret, &fput_needed2);
3825         if (!counter_file)
3826                 goto err_free_put_context;
3827
3828         counter->filp = counter_file;
3829         WARN_ON_ONCE(ctx->parent_ctx);
3830         mutex_lock(&ctx->mutex);
3831         perf_install_in_context(ctx, counter, cpu);
3832         ++ctx->generation;
3833         mutex_unlock(&ctx->mutex);
3834
3835         counter->owner = current;
3836         get_task_struct(current);
3837         mutex_lock(&current->perf_counter_mutex);
3838         list_add_tail(&counter->owner_entry, &current->perf_counter_list);
3839         mutex_unlock(&current->perf_counter_mutex);
3840
3841         fput_light(counter_file, fput_needed2);
3842
3843 out_fput:
3844         fput_light(group_file, fput_needed);
3845
3846         return ret;
3847
3848 err_free_put_context:
3849         kfree(counter);
3850
3851 err_put_context:
3852         put_ctx(ctx);
3853
3854         goto out_fput;
3855 }
3856
3857 /*
3858  * inherit a counter from parent task to child task:
3859  */
3860 static struct perf_counter *
3861 inherit_counter(struct perf_counter *parent_counter,
3862               struct task_struct *parent,
3863               struct perf_counter_context *parent_ctx,
3864               struct task_struct *child,
3865               struct perf_counter *group_leader,
3866               struct perf_counter_context *child_ctx)
3867 {
3868         struct perf_counter *child_counter;
3869
3870         /*
3871          * Instead of creating recursive hierarchies of counters,
3872          * we link inherited counters back to the original parent,
3873          * which has a filp for sure, which we use as the reference
3874          * count:
3875          */
3876         if (parent_counter->parent)
3877                 parent_counter = parent_counter->parent;
3878
3879         child_counter = perf_counter_alloc(&parent_counter->attr,
3880                                            parent_counter->cpu, child_ctx,
3881                                            group_leader, GFP_KERNEL);
3882         if (IS_ERR(child_counter))
3883                 return child_counter;
3884         get_ctx(child_ctx);
3885
3886         /*
3887          * Make the child state follow the state of the parent counter,
3888          * not its attr.disabled bit.  We hold the parent's mutex,
3889          * so we won't race with perf_counter_{en, dis}able_family.
3890          */
3891         if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
3892                 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
3893         else
3894                 child_counter->state = PERF_COUNTER_STATE_OFF;
3895
3896         if (parent_counter->attr.freq)
3897                 child_counter->hw.sample_period = parent_counter->hw.sample_period;
3898
3899         /*
3900          * Link it up in the child's context:
3901          */
3902         add_counter_to_ctx(child_counter, child_ctx);
3903
3904         child_counter->parent = parent_counter;
3905         /*
3906          * inherit into child's child as well:
3907          */
3908         child_counter->attr.inherit = 1;
3909
3910         /*
3911          * Get a reference to the parent filp - we will fput it
3912          * when the child counter exits. This is safe to do because
3913          * we are in the parent and we know that the filp still
3914          * exists and has a nonzero count:
3915          */
3916         atomic_long_inc(&parent_counter->filp->f_count);
3917
3918         /*
3919          * Link this into the parent counter's child list
3920          */
3921         WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3922         mutex_lock(&parent_counter->child_mutex);
3923         list_add_tail(&child_counter->child_list, &parent_counter->child_list);
3924         mutex_unlock(&parent_counter->child_mutex);
3925
3926         return child_counter;
3927 }
3928
3929 static int inherit_group(struct perf_counter *parent_counter,
3930               struct task_struct *parent,
3931               struct perf_counter_context *parent_ctx,
3932               struct task_struct *child,
3933               struct perf_counter_context *child_ctx)
3934 {
3935         struct perf_counter *leader;
3936         struct perf_counter *sub;
3937         struct perf_counter *child_ctr;
3938
3939         leader = inherit_counter(parent_counter, parent, parent_ctx,
3940                                  child, NULL, child_ctx);
3941         if (IS_ERR(leader))
3942                 return PTR_ERR(leader);
3943         list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
3944                 child_ctr = inherit_counter(sub, parent, parent_ctx,
3945                                             child, leader, child_ctx);
3946                 if (IS_ERR(child_ctr))
3947                         return PTR_ERR(child_ctr);
3948         }
3949         return 0;
3950 }
3951
3952 static void sync_child_counter(struct perf_counter *child_counter,
3953                                struct perf_counter *parent_counter)
3954 {
3955         u64 child_val;
3956
3957         child_val = atomic64_read(&child_counter->count);
3958
3959         /*
3960          * Add back the child's count to the parent's count:
3961          */
3962         atomic64_add(child_val, &parent_counter->count);
3963         atomic64_add(child_counter->total_time_enabled,
3964                      &parent_counter->child_total_time_enabled);
3965         atomic64_add(child_counter->total_time_running,
3966                      &parent_counter->child_total_time_running);
3967
3968         /*
3969          * Remove this counter from the parent's list
3970          */
3971         WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3972         mutex_lock(&parent_counter->child_mutex);
3973         list_del_init(&child_counter->child_list);
3974         mutex_unlock(&parent_counter->child_mutex);
3975
3976         /*
3977          * Release the parent counter, if this was the last
3978          * reference to it.
3979          */
3980         fput(parent_counter->filp);
3981 }
3982
3983 static void
3984 __perf_counter_exit_task(struct perf_counter *child_counter,
3985                          struct perf_counter_context *child_ctx)
3986 {
3987         struct perf_counter *parent_counter;
3988
3989         update_counter_times(child_counter);
3990         perf_counter_remove_from_context(child_counter);
3991
3992         parent_counter = child_counter->parent;
3993         /*
3994          * It can happen that parent exits first, and has counters
3995          * that are still around due to the child reference. These
3996          * counters need to be zapped - but otherwise linger.
3997          */
3998         if (parent_counter) {
3999                 sync_child_counter(child_counter, parent_counter);
4000                 free_counter(child_counter);
4001         }
4002 }
4003
4004 /*
4005  * When a child task exits, feed back counter values to parent counters.
4006  */
4007 void perf_counter_exit_task(struct task_struct *child)
4008 {
4009         struct perf_counter *child_counter, *tmp;
4010         struct perf_counter_context *child_ctx;
4011         unsigned long flags;
4012
4013         if (likely(!child->perf_counter_ctxp))
4014                 return;
4015
4016         local_irq_save(flags);
4017         /*
4018          * We can't reschedule here because interrupts are disabled,
4019          * and either child is current or it is a task that can't be
4020          * scheduled, so we are now safe from rescheduling changing
4021          * our context.
4022          */
4023         child_ctx = child->perf_counter_ctxp;
4024         __perf_counter_task_sched_out(child_ctx);
4025
4026         /*
4027          * Take the context lock here so that if find_get_context is
4028          * reading child->perf_counter_ctxp, we wait until it has
4029          * incremented the context's refcount before we do put_ctx below.
4030          */
4031         spin_lock(&child_ctx->lock);
4032         child->perf_counter_ctxp = NULL;
4033         if (child_ctx->parent_ctx) {
4034                 /*
4035                  * This context is a clone; unclone it so it can't get
4036                  * swapped to another process while we're removing all
4037                  * the counters from it.
4038                  */
4039                 put_ctx(child_ctx->parent_ctx);
4040                 child_ctx->parent_ctx = NULL;
4041         }
4042         spin_unlock(&child_ctx->lock);
4043         local_irq_restore(flags);
4044
4045         /*
4046          * We can recurse on the same lock type through:
4047          *
4048          *   __perf_counter_exit_task()
4049          *     sync_child_counter()
4050          *       fput(parent_counter->filp)
4051          *         perf_release()
4052          *           mutex_lock(&ctx->mutex)
4053          *
4054          * But since its the parent context it won't be the same instance.
4055          */
4056         mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
4057
4058 again:
4059         list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
4060                                  list_entry)
4061                 __perf_counter_exit_task(child_counter, child_ctx);
4062
4063         /*
4064          * If the last counter was a group counter, it will have appended all
4065          * its siblings to the list, but we obtained 'tmp' before that which
4066          * will still point to the list head terminating the iteration.
4067          */
4068         if (!list_empty(&child_ctx->counter_list))
4069                 goto again;
4070
4071         mutex_unlock(&child_ctx->mutex);
4072
4073         put_ctx(child_ctx);
4074 }
4075
4076 /*
4077  * free an unexposed, unused context as created by inheritance by
4078  * init_task below, used by fork() in case of fail.
4079  */
4080 void perf_counter_free_task(struct task_struct *task)
4081 {
4082         struct perf_counter_context *ctx = task->perf_counter_ctxp;
4083         struct perf_counter *counter, *tmp;
4084
4085         if (!ctx)
4086                 return;
4087
4088         mutex_lock(&ctx->mutex);
4089 again:
4090         list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
4091                 struct perf_counter *parent = counter->parent;
4092
4093                 if (WARN_ON_ONCE(!parent))
4094                         continue;
4095
4096                 mutex_lock(&parent->child_mutex);
4097                 list_del_init(&counter->child_list);
4098                 mutex_unlock(&parent->child_mutex);
4099
4100                 fput(parent->filp);
4101
4102                 list_del_counter(counter, ctx);
4103                 free_counter(counter);
4104         }
4105
4106         if (!list_empty(&ctx->counter_list))
4107                 goto again;
4108
4109         mutex_unlock(&ctx->mutex);
4110
4111         put_ctx(ctx);
4112 }
4113
4114 /*
4115  * Initialize the perf_counter context in task_struct
4116  */
4117 int perf_counter_init_task(struct task_struct *child)
4118 {
4119         struct perf_counter_context *child_ctx, *parent_ctx;
4120         struct perf_counter_context *cloned_ctx;
4121         struct perf_counter *counter;
4122         struct task_struct *parent = current;
4123         int inherited_all = 1;
4124         int ret = 0;
4125
4126         child->perf_counter_ctxp = NULL;
4127
4128         mutex_init(&child->perf_counter_mutex);
4129         INIT_LIST_HEAD(&child->perf_counter_list);
4130
4131         if (likely(!parent->perf_counter_ctxp))
4132                 return 0;
4133
4134         /*
4135          * This is executed from the parent task context, so inherit
4136          * counters that have been marked for cloning.
4137          * First allocate and initialize a context for the child.
4138          */
4139
4140         child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
4141         if (!child_ctx)
4142                 return -ENOMEM;
4143
4144         __perf_counter_init_context(child_ctx, child);
4145         child->perf_counter_ctxp = child_ctx;
4146         get_task_struct(child);
4147
4148         /*
4149          * If the parent's context is a clone, pin it so it won't get
4150          * swapped under us.
4151          */
4152         parent_ctx = perf_pin_task_context(parent);
4153
4154         /*
4155          * No need to check if parent_ctx != NULL here; since we saw
4156          * it non-NULL earlier, the only reason for it to become NULL
4157          * is if we exit, and since we're currently in the middle of
4158          * a fork we can't be exiting at the same time.
4159          */
4160
4161         /*
4162          * Lock the parent list. No need to lock the child - not PID
4163          * hashed yet and not running, so nobody can access it.
4164          */
4165         mutex_lock(&parent_ctx->mutex);
4166
4167         /*
4168          * We dont have to disable NMIs - we are only looking at
4169          * the list, not manipulating it:
4170          */
4171         list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
4172                 if (counter != counter->group_leader)
4173                         continue;
4174
4175                 if (!counter->attr.inherit) {
4176                         inherited_all = 0;
4177                         continue;
4178                 }
4179
4180                 ret = inherit_group(counter, parent, parent_ctx,
4181                                              child, child_ctx);
4182                 if (ret) {
4183                         inherited_all = 0;
4184                         break;
4185                 }
4186         }
4187
4188         if (inherited_all) {
4189                 /*
4190                  * Mark the child context as a clone of the parent
4191                  * context, or of whatever the parent is a clone of.
4192                  * Note that if the parent is a clone, it could get
4193                  * uncloned at any point, but that doesn't matter
4194                  * because the list of counters and the generation
4195                  * count can't have changed since we took the mutex.
4196                  */
4197                 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
4198                 if (cloned_ctx) {
4199                         child_ctx->parent_ctx = cloned_ctx;
4200                         child_ctx->parent_gen = parent_ctx->parent_gen;
4201                 } else {
4202                         child_ctx->parent_ctx = parent_ctx;
4203                         child_ctx->parent_gen = parent_ctx->generation;
4204                 }
4205                 get_ctx(child_ctx->parent_ctx);
4206         }
4207
4208         mutex_unlock(&parent_ctx->mutex);
4209
4210         perf_unpin_context(parent_ctx);
4211
4212         return ret;
4213 }
4214
4215 static void __cpuinit perf_counter_init_cpu(int cpu)
4216 {
4217         struct perf_cpu_context *cpuctx;
4218
4219         cpuctx = &per_cpu(perf_cpu_context, cpu);
4220         __perf_counter_init_context(&cpuctx->ctx, NULL);
4221
4222         spin_lock(&perf_resource_lock);
4223         cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
4224         spin_unlock(&perf_resource_lock);
4225
4226         hw_perf_counter_setup(cpu);
4227 }
4228
4229 #ifdef CONFIG_HOTPLUG_CPU
4230 static void __perf_counter_exit_cpu(void *info)
4231 {
4232         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4233         struct perf_counter_context *ctx = &cpuctx->ctx;
4234         struct perf_counter *counter, *tmp;
4235
4236         list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
4237                 __perf_counter_remove_from_context(counter);
4238 }
4239 static void perf_counter_exit_cpu(int cpu)
4240 {
4241         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4242         struct perf_counter_context *ctx = &cpuctx->ctx;
4243
4244         mutex_lock(&ctx->mutex);
4245         smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
4246         mutex_unlock(&ctx->mutex);
4247 }
4248 #else
4249 static inline void perf_counter_exit_cpu(int cpu) { }
4250 #endif
4251
4252 static int __cpuinit
4253 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4254 {
4255         unsigned int cpu = (long)hcpu;
4256
4257         switch (action) {
4258
4259         case CPU_UP_PREPARE:
4260         case CPU_UP_PREPARE_FROZEN:
4261                 perf_counter_init_cpu(cpu);
4262                 break;
4263
4264         case CPU_DOWN_PREPARE:
4265         case CPU_DOWN_PREPARE_FROZEN:
4266                 perf_counter_exit_cpu(cpu);
4267                 break;
4268
4269         default:
4270                 break;
4271         }
4272
4273         return NOTIFY_OK;
4274 }
4275
4276 /*
4277  * This has to have a higher priority than migration_notifier in sched.c.
4278  */
4279 static struct notifier_block __cpuinitdata perf_cpu_nb = {
4280         .notifier_call          = perf_cpu_notify,
4281         .priority               = 20,
4282 };
4283
4284 void __init perf_counter_init(void)
4285 {
4286         perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4287                         (void *)(long)smp_processor_id());
4288         register_cpu_notifier(&perf_cpu_nb);
4289 }
4290
4291 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
4292 {
4293         return sprintf(buf, "%d\n", perf_reserved_percpu);
4294 }
4295
4296 static ssize_t
4297 perf_set_reserve_percpu(struct sysdev_class *class,
4298                         const char *buf,
4299                         size_t count)
4300 {
4301         struct perf_cpu_context *cpuctx;
4302         unsigned long val;
4303         int err, cpu, mpt;
4304
4305         err = strict_strtoul(buf, 10, &val);
4306         if (err)
4307                 return err;
4308         if (val > perf_max_counters)
4309                 return -EINVAL;
4310
4311         spin_lock(&perf_resource_lock);
4312         perf_reserved_percpu = val;
4313         for_each_online_cpu(cpu) {
4314                 cpuctx = &per_cpu(perf_cpu_context, cpu);
4315                 spin_lock_irq(&cpuctx->ctx.lock);
4316                 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
4317                           perf_max_counters - perf_reserved_percpu);
4318                 cpuctx->max_pertask = mpt;
4319                 spin_unlock_irq(&cpuctx->ctx.lock);
4320         }
4321         spin_unlock(&perf_resource_lock);
4322
4323         return count;
4324 }
4325
4326 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
4327 {
4328         return sprintf(buf, "%d\n", perf_overcommit);
4329 }
4330
4331 static ssize_t
4332 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
4333 {
4334         unsigned long val;
4335         int err;
4336
4337         err = strict_strtoul(buf, 10, &val);
4338         if (err)
4339                 return err;
4340         if (val > 1)
4341                 return -EINVAL;
4342
4343         spin_lock(&perf_resource_lock);
4344         perf_overcommit = val;
4345         spin_unlock(&perf_resource_lock);
4346
4347         return count;
4348 }
4349
4350 static SYSDEV_CLASS_ATTR(
4351                                 reserve_percpu,
4352                                 0644,
4353                                 perf_show_reserve_percpu,
4354                                 perf_set_reserve_percpu
4355                         );
4356
4357 static SYSDEV_CLASS_ATTR(
4358                                 overcommit,
4359                                 0644,
4360                                 perf_show_overcommit,
4361                                 perf_set_overcommit
4362                         );
4363
4364 static struct attribute *perfclass_attrs[] = {
4365         &attr_reserve_percpu.attr,
4366         &attr_overcommit.attr,
4367         NULL
4368 };
4369
4370 static struct attribute_group perfclass_attr_group = {
4371         .attrs                  = perfclass_attrs,
4372         .name                   = "perf_counters",
4373 };
4374
4375 static int __init perf_counter_sysfs_init(void)
4376 {
4377         return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
4378                                   &perfclass_attr_group);
4379 }
4380 device_initcall(perf_counter_sysfs_init);