]> git.karo-electronics.de Git - mv-sheeva.git/blob - kernel/perf_counter.c
perf_counter: provide major/minor page fault software events
[mv-sheeva.git] / kernel / perf_counter.c
1 /*
2  * Performance counter core code
3  *
4  *  Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
6  *
7  *  For licencing details see kernel-base/COPYING
8  */
9
10 #include <linux/fs.h>
11 #include <linux/cpu.h>
12 #include <linux/smp.h>
13 #include <linux/file.h>
14 #include <linux/poll.h>
15 #include <linux/sysfs.h>
16 #include <linux/ptrace.h>
17 #include <linux/percpu.h>
18 #include <linux/uaccess.h>
19 #include <linux/syscalls.h>
20 #include <linux/anon_inodes.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/perf_counter.h>
23 #include <linux/mm.h>
24 #include <linux/vmstat.h>
25
26 /*
27  * Each CPU has a list of per CPU counters:
28  */
29 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
30
31 int perf_max_counters __read_mostly = 1;
32 static int perf_reserved_percpu __read_mostly;
33 static int perf_overcommit __read_mostly = 1;
34
35 /*
36  * Mutex for (sysadmin-configurable) counter reservations:
37  */
38 static DEFINE_MUTEX(perf_resource_mutex);
39
40 /*
41  * Architecture provided APIs - weak aliases:
42  */
43 extern __weak const struct hw_perf_counter_ops *
44 hw_perf_counter_init(struct perf_counter *counter)
45 {
46         return NULL;
47 }
48
49 u64 __weak hw_perf_save_disable(void)           { return 0; }
50 void __weak hw_perf_restore(u64 ctrl)           { barrier(); }
51 void __weak hw_perf_counter_setup(int cpu)      { barrier(); }
52 int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
53                struct perf_cpu_context *cpuctx,
54                struct perf_counter_context *ctx, int cpu)
55 {
56         return 0;
57 }
58
59 void __weak perf_counter_print_debug(void)      { }
60
61 static void
62 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
63 {
64         struct perf_counter *group_leader = counter->group_leader;
65
66         /*
67          * Depending on whether it is a standalone or sibling counter,
68          * add it straight to the context's counter list, or to the group
69          * leader's sibling list:
70          */
71         if (counter->group_leader == counter)
72                 list_add_tail(&counter->list_entry, &ctx->counter_list);
73         else
74                 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
75 }
76
77 static void
78 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
79 {
80         struct perf_counter *sibling, *tmp;
81
82         list_del_init(&counter->list_entry);
83
84         /*
85          * If this was a group counter with sibling counters then
86          * upgrade the siblings to singleton counters by adding them
87          * to the context list directly:
88          */
89         list_for_each_entry_safe(sibling, tmp,
90                                  &counter->sibling_list, list_entry) {
91
92                 list_move_tail(&sibling->list_entry, &ctx->counter_list);
93                 sibling->group_leader = sibling;
94         }
95 }
96
97 static void
98 counter_sched_out(struct perf_counter *counter,
99                   struct perf_cpu_context *cpuctx,
100                   struct perf_counter_context *ctx)
101 {
102         if (counter->state != PERF_COUNTER_STATE_ACTIVE)
103                 return;
104
105         counter->state = PERF_COUNTER_STATE_INACTIVE;
106         counter->hw_ops->disable(counter);
107         counter->oncpu = -1;
108
109         if (!is_software_counter(counter))
110                 cpuctx->active_oncpu--;
111         ctx->nr_active--;
112         if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
113                 cpuctx->exclusive = 0;
114 }
115
116 static void
117 group_sched_out(struct perf_counter *group_counter,
118                 struct perf_cpu_context *cpuctx,
119                 struct perf_counter_context *ctx)
120 {
121         struct perf_counter *counter;
122
123         if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
124                 return;
125
126         counter_sched_out(group_counter, cpuctx, ctx);
127
128         /*
129          * Schedule out siblings (if any):
130          */
131         list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
132                 counter_sched_out(counter, cpuctx, ctx);
133
134         if (group_counter->hw_event.exclusive)
135                 cpuctx->exclusive = 0;
136 }
137
138 /*
139  * Cross CPU call to remove a performance counter
140  *
141  * We disable the counter on the hardware level first. After that we
142  * remove it from the context list.
143  */
144 static void __perf_counter_remove_from_context(void *info)
145 {
146         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
147         struct perf_counter *counter = info;
148         struct perf_counter_context *ctx = counter->ctx;
149         unsigned long flags;
150         u64 perf_flags;
151
152         /*
153          * If this is a task context, we need to check whether it is
154          * the current task context of this cpu. If not it has been
155          * scheduled out before the smp call arrived.
156          */
157         if (ctx->task && cpuctx->task_ctx != ctx)
158                 return;
159
160         curr_rq_lock_irq_save(&flags);
161         spin_lock(&ctx->lock);
162
163         counter_sched_out(counter, cpuctx, ctx);
164
165         counter->task = NULL;
166         ctx->nr_counters--;
167
168         /*
169          * Protect the list operation against NMI by disabling the
170          * counters on a global level. NOP for non NMI based counters.
171          */
172         perf_flags = hw_perf_save_disable();
173         list_del_counter(counter, ctx);
174         hw_perf_restore(perf_flags);
175
176         if (!ctx->task) {
177                 /*
178                  * Allow more per task counters with respect to the
179                  * reservation:
180                  */
181                 cpuctx->max_pertask =
182                         min(perf_max_counters - ctx->nr_counters,
183                             perf_max_counters - perf_reserved_percpu);
184         }
185
186         spin_unlock(&ctx->lock);
187         curr_rq_unlock_irq_restore(&flags);
188 }
189
190
191 /*
192  * Remove the counter from a task's (or a CPU's) list of counters.
193  *
194  * Must be called with counter->mutex and ctx->mutex held.
195  *
196  * CPU counters are removed with a smp call. For task counters we only
197  * call when the task is on a CPU.
198  */
199 static void perf_counter_remove_from_context(struct perf_counter *counter)
200 {
201         struct perf_counter_context *ctx = counter->ctx;
202         struct task_struct *task = ctx->task;
203
204         if (!task) {
205                 /*
206                  * Per cpu counters are removed via an smp call and
207                  * the removal is always sucessful.
208                  */
209                 smp_call_function_single(counter->cpu,
210                                          __perf_counter_remove_from_context,
211                                          counter, 1);
212                 return;
213         }
214
215 retry:
216         task_oncpu_function_call(task, __perf_counter_remove_from_context,
217                                  counter);
218
219         spin_lock_irq(&ctx->lock);
220         /*
221          * If the context is active we need to retry the smp call.
222          */
223         if (ctx->nr_active && !list_empty(&counter->list_entry)) {
224                 spin_unlock_irq(&ctx->lock);
225                 goto retry;
226         }
227
228         /*
229          * The lock prevents that this context is scheduled in so we
230          * can remove the counter safely, if the call above did not
231          * succeed.
232          */
233         if (!list_empty(&counter->list_entry)) {
234                 ctx->nr_counters--;
235                 list_del_counter(counter, ctx);
236                 counter->task = NULL;
237         }
238         spin_unlock_irq(&ctx->lock);
239 }
240
241 /*
242  * Cross CPU call to disable a performance counter
243  */
244 static void __perf_counter_disable(void *info)
245 {
246         struct perf_counter *counter = info;
247         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
248         struct perf_counter_context *ctx = counter->ctx;
249         unsigned long flags;
250
251         /*
252          * If this is a per-task counter, need to check whether this
253          * counter's task is the current task on this cpu.
254          */
255         if (ctx->task && cpuctx->task_ctx != ctx)
256                 return;
257
258         curr_rq_lock_irq_save(&flags);
259         spin_lock(&ctx->lock);
260
261         /*
262          * If the counter is on, turn it off.
263          * If it is in error state, leave it in error state.
264          */
265         if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
266                 if (counter == counter->group_leader)
267                         group_sched_out(counter, cpuctx, ctx);
268                 else
269                         counter_sched_out(counter, cpuctx, ctx);
270                 counter->state = PERF_COUNTER_STATE_OFF;
271         }
272
273         spin_unlock(&ctx->lock);
274         curr_rq_unlock_irq_restore(&flags);
275 }
276
277 /*
278  * Disable a counter.
279  */
280 static void perf_counter_disable(struct perf_counter *counter)
281 {
282         struct perf_counter_context *ctx = counter->ctx;
283         struct task_struct *task = ctx->task;
284
285         if (!task) {
286                 /*
287                  * Disable the counter on the cpu that it's on
288                  */
289                 smp_call_function_single(counter->cpu, __perf_counter_disable,
290                                          counter, 1);
291                 return;
292         }
293
294  retry:
295         task_oncpu_function_call(task, __perf_counter_disable, counter);
296
297         spin_lock_irq(&ctx->lock);
298         /*
299          * If the counter is still active, we need to retry the cross-call.
300          */
301         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
302                 spin_unlock_irq(&ctx->lock);
303                 goto retry;
304         }
305
306         /*
307          * Since we have the lock this context can't be scheduled
308          * in, so we can change the state safely.
309          */
310         if (counter->state == PERF_COUNTER_STATE_INACTIVE)
311                 counter->state = PERF_COUNTER_STATE_OFF;
312
313         spin_unlock_irq(&ctx->lock);
314 }
315
316 /*
317  * Disable a counter and all its children.
318  */
319 static void perf_counter_disable_family(struct perf_counter *counter)
320 {
321         struct perf_counter *child;
322
323         perf_counter_disable(counter);
324
325         /*
326          * Lock the mutex to protect the list of children
327          */
328         mutex_lock(&counter->mutex);
329         list_for_each_entry(child, &counter->child_list, child_list)
330                 perf_counter_disable(child);
331         mutex_unlock(&counter->mutex);
332 }
333
334 static int
335 counter_sched_in(struct perf_counter *counter,
336                  struct perf_cpu_context *cpuctx,
337                  struct perf_counter_context *ctx,
338                  int cpu)
339 {
340         if (counter->state <= PERF_COUNTER_STATE_OFF)
341                 return 0;
342
343         counter->state = PERF_COUNTER_STATE_ACTIVE;
344         counter->oncpu = cpu;   /* TODO: put 'cpu' into cpuctx->cpu */
345         /*
346          * The new state must be visible before we turn it on in the hardware:
347          */
348         smp_wmb();
349
350         if (counter->hw_ops->enable(counter)) {
351                 counter->state = PERF_COUNTER_STATE_INACTIVE;
352                 counter->oncpu = -1;
353                 return -EAGAIN;
354         }
355
356         if (!is_software_counter(counter))
357                 cpuctx->active_oncpu++;
358         ctx->nr_active++;
359
360         if (counter->hw_event.exclusive)
361                 cpuctx->exclusive = 1;
362
363         return 0;
364 }
365
366 /*
367  * Return 1 for a group consisting entirely of software counters,
368  * 0 if the group contains any hardware counters.
369  */
370 static int is_software_only_group(struct perf_counter *leader)
371 {
372         struct perf_counter *counter;
373
374         if (!is_software_counter(leader))
375                 return 0;
376         list_for_each_entry(counter, &leader->sibling_list, list_entry)
377                 if (!is_software_counter(counter))
378                         return 0;
379         return 1;
380 }
381
382 /*
383  * Work out whether we can put this counter group on the CPU now.
384  */
385 static int group_can_go_on(struct perf_counter *counter,
386                            struct perf_cpu_context *cpuctx,
387                            int can_add_hw)
388 {
389         /*
390          * Groups consisting entirely of software counters can always go on.
391          */
392         if (is_software_only_group(counter))
393                 return 1;
394         /*
395          * If an exclusive group is already on, no other hardware
396          * counters can go on.
397          */
398         if (cpuctx->exclusive)
399                 return 0;
400         /*
401          * If this group is exclusive and there are already
402          * counters on the CPU, it can't go on.
403          */
404         if (counter->hw_event.exclusive && cpuctx->active_oncpu)
405                 return 0;
406         /*
407          * Otherwise, try to add it if all previous groups were able
408          * to go on.
409          */
410         return can_add_hw;
411 }
412
413 /*
414  * Cross CPU call to install and enable a performance counter
415  */
416 static void __perf_install_in_context(void *info)
417 {
418         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
419         struct perf_counter *counter = info;
420         struct perf_counter_context *ctx = counter->ctx;
421         struct perf_counter *leader = counter->group_leader;
422         int cpu = smp_processor_id();
423         unsigned long flags;
424         u64 perf_flags;
425         int err;
426
427         /*
428          * If this is a task context, we need to check whether it is
429          * the current task context of this cpu. If not it has been
430          * scheduled out before the smp call arrived.
431          */
432         if (ctx->task && cpuctx->task_ctx != ctx)
433                 return;
434
435         curr_rq_lock_irq_save(&flags);
436         spin_lock(&ctx->lock);
437
438         /*
439          * Protect the list operation against NMI by disabling the
440          * counters on a global level. NOP for non NMI based counters.
441          */
442         perf_flags = hw_perf_save_disable();
443
444         list_add_counter(counter, ctx);
445         ctx->nr_counters++;
446         counter->prev_state = PERF_COUNTER_STATE_OFF;
447
448         /*
449          * Don't put the counter on if it is disabled or if
450          * it is in a group and the group isn't on.
451          */
452         if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
453             (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
454                 goto unlock;
455
456         /*
457          * An exclusive counter can't go on if there are already active
458          * hardware counters, and no hardware counter can go on if there
459          * is already an exclusive counter on.
460          */
461         if (!group_can_go_on(counter, cpuctx, 1))
462                 err = -EEXIST;
463         else
464                 err = counter_sched_in(counter, cpuctx, ctx, cpu);
465
466         if (err) {
467                 /*
468                  * This counter couldn't go on.  If it is in a group
469                  * then we have to pull the whole group off.
470                  * If the counter group is pinned then put it in error state.
471                  */
472                 if (leader != counter)
473                         group_sched_out(leader, cpuctx, ctx);
474                 if (leader->hw_event.pinned)
475                         leader->state = PERF_COUNTER_STATE_ERROR;
476         }
477
478         if (!err && !ctx->task && cpuctx->max_pertask)
479                 cpuctx->max_pertask--;
480
481  unlock:
482         hw_perf_restore(perf_flags);
483
484         spin_unlock(&ctx->lock);
485         curr_rq_unlock_irq_restore(&flags);
486 }
487
488 /*
489  * Attach a performance counter to a context
490  *
491  * First we add the counter to the list with the hardware enable bit
492  * in counter->hw_config cleared.
493  *
494  * If the counter is attached to a task which is on a CPU we use a smp
495  * call to enable it in the task context. The task might have been
496  * scheduled away, but we check this in the smp call again.
497  *
498  * Must be called with ctx->mutex held.
499  */
500 static void
501 perf_install_in_context(struct perf_counter_context *ctx,
502                         struct perf_counter *counter,
503                         int cpu)
504 {
505         struct task_struct *task = ctx->task;
506
507         if (!task) {
508                 /*
509                  * Per cpu counters are installed via an smp call and
510                  * the install is always sucessful.
511                  */
512                 smp_call_function_single(cpu, __perf_install_in_context,
513                                          counter, 1);
514                 return;
515         }
516
517         counter->task = task;
518 retry:
519         task_oncpu_function_call(task, __perf_install_in_context,
520                                  counter);
521
522         spin_lock_irq(&ctx->lock);
523         /*
524          * we need to retry the smp call.
525          */
526         if (ctx->is_active && list_empty(&counter->list_entry)) {
527                 spin_unlock_irq(&ctx->lock);
528                 goto retry;
529         }
530
531         /*
532          * The lock prevents that this context is scheduled in so we
533          * can add the counter safely, if it the call above did not
534          * succeed.
535          */
536         if (list_empty(&counter->list_entry)) {
537                 list_add_counter(counter, ctx);
538                 ctx->nr_counters++;
539         }
540         spin_unlock_irq(&ctx->lock);
541 }
542
543 /*
544  * Cross CPU call to enable a performance counter
545  */
546 static void __perf_counter_enable(void *info)
547 {
548         struct perf_counter *counter = info;
549         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
550         struct perf_counter_context *ctx = counter->ctx;
551         struct perf_counter *leader = counter->group_leader;
552         unsigned long flags;
553         int err;
554
555         /*
556          * If this is a per-task counter, need to check whether this
557          * counter's task is the current task on this cpu.
558          */
559         if (ctx->task && cpuctx->task_ctx != ctx)
560                 return;
561
562         curr_rq_lock_irq_save(&flags);
563         spin_lock(&ctx->lock);
564
565         counter->prev_state = counter->state;
566         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
567                 goto unlock;
568         counter->state = PERF_COUNTER_STATE_INACTIVE;
569
570         /*
571          * If the counter is in a group and isn't the group leader,
572          * then don't put it on unless the group is on.
573          */
574         if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
575                 goto unlock;
576
577         if (!group_can_go_on(counter, cpuctx, 1))
578                 err = -EEXIST;
579         else
580                 err = counter_sched_in(counter, cpuctx, ctx,
581                                        smp_processor_id());
582
583         if (err) {
584                 /*
585                  * If this counter can't go on and it's part of a
586                  * group, then the whole group has to come off.
587                  */
588                 if (leader != counter)
589                         group_sched_out(leader, cpuctx, ctx);
590                 if (leader->hw_event.pinned)
591                         leader->state = PERF_COUNTER_STATE_ERROR;
592         }
593
594  unlock:
595         spin_unlock(&ctx->lock);
596         curr_rq_unlock_irq_restore(&flags);
597 }
598
599 /*
600  * Enable a counter.
601  */
602 static void perf_counter_enable(struct perf_counter *counter)
603 {
604         struct perf_counter_context *ctx = counter->ctx;
605         struct task_struct *task = ctx->task;
606
607         if (!task) {
608                 /*
609                  * Enable the counter on the cpu that it's on
610                  */
611                 smp_call_function_single(counter->cpu, __perf_counter_enable,
612                                          counter, 1);
613                 return;
614         }
615
616         spin_lock_irq(&ctx->lock);
617         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
618                 goto out;
619
620         /*
621          * If the counter is in error state, clear that first.
622          * That way, if we see the counter in error state below, we
623          * know that it has gone back into error state, as distinct
624          * from the task having been scheduled away before the
625          * cross-call arrived.
626          */
627         if (counter->state == PERF_COUNTER_STATE_ERROR)
628                 counter->state = PERF_COUNTER_STATE_OFF;
629
630  retry:
631         spin_unlock_irq(&ctx->lock);
632         task_oncpu_function_call(task, __perf_counter_enable, counter);
633
634         spin_lock_irq(&ctx->lock);
635
636         /*
637          * If the context is active and the counter is still off,
638          * we need to retry the cross-call.
639          */
640         if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
641                 goto retry;
642
643         /*
644          * Since we have the lock this context can't be scheduled
645          * in, so we can change the state safely.
646          */
647         if (counter->state == PERF_COUNTER_STATE_OFF)
648                 counter->state = PERF_COUNTER_STATE_INACTIVE;
649  out:
650         spin_unlock_irq(&ctx->lock);
651 }
652
653 /*
654  * Enable a counter and all its children.
655  */
656 static void perf_counter_enable_family(struct perf_counter *counter)
657 {
658         struct perf_counter *child;
659
660         perf_counter_enable(counter);
661
662         /*
663          * Lock the mutex to protect the list of children
664          */
665         mutex_lock(&counter->mutex);
666         list_for_each_entry(child, &counter->child_list, child_list)
667                 perf_counter_enable(child);
668         mutex_unlock(&counter->mutex);
669 }
670
671 void __perf_counter_sched_out(struct perf_counter_context *ctx,
672                               struct perf_cpu_context *cpuctx)
673 {
674         struct perf_counter *counter;
675         u64 flags;
676
677         spin_lock(&ctx->lock);
678         ctx->is_active = 0;
679         if (likely(!ctx->nr_counters))
680                 goto out;
681
682         flags = hw_perf_save_disable();
683         if (ctx->nr_active) {
684                 list_for_each_entry(counter, &ctx->counter_list, list_entry)
685                         group_sched_out(counter, cpuctx, ctx);
686         }
687         hw_perf_restore(flags);
688  out:
689         spin_unlock(&ctx->lock);
690 }
691
692 /*
693  * Called from scheduler to remove the counters of the current task,
694  * with interrupts disabled.
695  *
696  * We stop each counter and update the counter value in counter->count.
697  *
698  * This does not protect us against NMI, but disable()
699  * sets the disabled bit in the control field of counter _before_
700  * accessing the counter control register. If a NMI hits, then it will
701  * not restart the counter.
702  */
703 void perf_counter_task_sched_out(struct task_struct *task, int cpu)
704 {
705         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
706         struct perf_counter_context *ctx = &task->perf_counter_ctx;
707
708         if (likely(!cpuctx->task_ctx))
709                 return;
710
711         __perf_counter_sched_out(ctx, cpuctx);
712
713         cpuctx->task_ctx = NULL;
714 }
715
716 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
717 {
718         __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
719 }
720
721 static int
722 group_sched_in(struct perf_counter *group_counter,
723                struct perf_cpu_context *cpuctx,
724                struct perf_counter_context *ctx,
725                int cpu)
726 {
727         struct perf_counter *counter, *partial_group;
728         int ret;
729
730         if (group_counter->state == PERF_COUNTER_STATE_OFF)
731                 return 0;
732
733         ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
734         if (ret)
735                 return ret < 0 ? ret : 0;
736
737         group_counter->prev_state = group_counter->state;
738         if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
739                 return -EAGAIN;
740
741         /*
742          * Schedule in siblings as one group (if any):
743          */
744         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
745                 counter->prev_state = counter->state;
746                 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
747                         partial_group = counter;
748                         goto group_error;
749                 }
750         }
751
752         return 0;
753
754 group_error:
755         /*
756          * Groups can be scheduled in as one unit only, so undo any
757          * partial group before returning:
758          */
759         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
760                 if (counter == partial_group)
761                         break;
762                 counter_sched_out(counter, cpuctx, ctx);
763         }
764         counter_sched_out(group_counter, cpuctx, ctx);
765
766         return -EAGAIN;
767 }
768
769 static void
770 __perf_counter_sched_in(struct perf_counter_context *ctx,
771                         struct perf_cpu_context *cpuctx, int cpu)
772 {
773         struct perf_counter *counter;
774         u64 flags;
775         int can_add_hw = 1;
776
777         spin_lock(&ctx->lock);
778         ctx->is_active = 1;
779         if (likely(!ctx->nr_counters))
780                 goto out;
781
782         flags = hw_perf_save_disable();
783
784         /*
785          * First go through the list and put on any pinned groups
786          * in order to give them the best chance of going on.
787          */
788         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
789                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
790                     !counter->hw_event.pinned)
791                         continue;
792                 if (counter->cpu != -1 && counter->cpu != cpu)
793                         continue;
794
795                 if (group_can_go_on(counter, cpuctx, 1))
796                         group_sched_in(counter, cpuctx, ctx, cpu);
797
798                 /*
799                  * If this pinned group hasn't been scheduled,
800                  * put it in error state.
801                  */
802                 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
803                         counter->state = PERF_COUNTER_STATE_ERROR;
804         }
805
806         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
807                 /*
808                  * Ignore counters in OFF or ERROR state, and
809                  * ignore pinned counters since we did them already.
810                  */
811                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
812                     counter->hw_event.pinned)
813                         continue;
814
815                 /*
816                  * Listen to the 'cpu' scheduling filter constraint
817                  * of counters:
818                  */
819                 if (counter->cpu != -1 && counter->cpu != cpu)
820                         continue;
821
822                 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
823                         if (group_sched_in(counter, cpuctx, ctx, cpu))
824                                 can_add_hw = 0;
825                 }
826         }
827         hw_perf_restore(flags);
828  out:
829         spin_unlock(&ctx->lock);
830 }
831
832 /*
833  * Called from scheduler to add the counters of the current task
834  * with interrupts disabled.
835  *
836  * We restore the counter value and then enable it.
837  *
838  * This does not protect us against NMI, but enable()
839  * sets the enabled bit in the control field of counter _before_
840  * accessing the counter control register. If a NMI hits, then it will
841  * keep the counter running.
842  */
843 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
844 {
845         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
846         struct perf_counter_context *ctx = &task->perf_counter_ctx;
847
848         __perf_counter_sched_in(ctx, cpuctx, cpu);
849         cpuctx->task_ctx = ctx;
850 }
851
852 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
853 {
854         struct perf_counter_context *ctx = &cpuctx->ctx;
855
856         __perf_counter_sched_in(ctx, cpuctx, cpu);
857 }
858
859 int perf_counter_task_disable(void)
860 {
861         struct task_struct *curr = current;
862         struct perf_counter_context *ctx = &curr->perf_counter_ctx;
863         struct perf_counter *counter;
864         unsigned long flags;
865         u64 perf_flags;
866         int cpu;
867
868         if (likely(!ctx->nr_counters))
869                 return 0;
870
871         curr_rq_lock_irq_save(&flags);
872         cpu = smp_processor_id();
873
874         /* force the update of the task clock: */
875         __task_delta_exec(curr, 1);
876
877         perf_counter_task_sched_out(curr, cpu);
878
879         spin_lock(&ctx->lock);
880
881         /*
882          * Disable all the counters:
883          */
884         perf_flags = hw_perf_save_disable();
885
886         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
887                 if (counter->state != PERF_COUNTER_STATE_ERROR)
888                         counter->state = PERF_COUNTER_STATE_OFF;
889         }
890
891         hw_perf_restore(perf_flags);
892
893         spin_unlock(&ctx->lock);
894
895         curr_rq_unlock_irq_restore(&flags);
896
897         return 0;
898 }
899
900 int perf_counter_task_enable(void)
901 {
902         struct task_struct *curr = current;
903         struct perf_counter_context *ctx = &curr->perf_counter_ctx;
904         struct perf_counter *counter;
905         unsigned long flags;
906         u64 perf_flags;
907         int cpu;
908
909         if (likely(!ctx->nr_counters))
910                 return 0;
911
912         curr_rq_lock_irq_save(&flags);
913         cpu = smp_processor_id();
914
915         /* force the update of the task clock: */
916         __task_delta_exec(curr, 1);
917
918         perf_counter_task_sched_out(curr, cpu);
919
920         spin_lock(&ctx->lock);
921
922         /*
923          * Disable all the counters:
924          */
925         perf_flags = hw_perf_save_disable();
926
927         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
928                 if (counter->state > PERF_COUNTER_STATE_OFF)
929                         continue;
930                 counter->state = PERF_COUNTER_STATE_INACTIVE;
931                 counter->hw_event.disabled = 0;
932         }
933         hw_perf_restore(perf_flags);
934
935         spin_unlock(&ctx->lock);
936
937         perf_counter_task_sched_in(curr, cpu);
938
939         curr_rq_unlock_irq_restore(&flags);
940
941         return 0;
942 }
943
944 /*
945  * Round-robin a context's counters:
946  */
947 static void rotate_ctx(struct perf_counter_context *ctx)
948 {
949         struct perf_counter *counter;
950         u64 perf_flags;
951
952         if (!ctx->nr_counters)
953                 return;
954
955         spin_lock(&ctx->lock);
956         /*
957          * Rotate the first entry last (works just fine for group counters too):
958          */
959         perf_flags = hw_perf_save_disable();
960         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
961                 list_move_tail(&counter->list_entry, &ctx->counter_list);
962                 break;
963         }
964         hw_perf_restore(perf_flags);
965
966         spin_unlock(&ctx->lock);
967 }
968
969 void perf_counter_task_tick(struct task_struct *curr, int cpu)
970 {
971         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
972         struct perf_counter_context *ctx = &curr->perf_counter_ctx;
973         const int rotate_percpu = 0;
974
975         if (rotate_percpu)
976                 perf_counter_cpu_sched_out(cpuctx);
977         perf_counter_task_sched_out(curr, cpu);
978
979         if (rotate_percpu)
980                 rotate_ctx(&cpuctx->ctx);
981         rotate_ctx(ctx);
982
983         if (rotate_percpu)
984                 perf_counter_cpu_sched_in(cpuctx, cpu);
985         perf_counter_task_sched_in(curr, cpu);
986 }
987
988 /*
989  * Cross CPU call to read the hardware counter
990  */
991 static void __read(void *info)
992 {
993         struct perf_counter *counter = info;
994         unsigned long flags;
995
996         curr_rq_lock_irq_save(&flags);
997         counter->hw_ops->read(counter);
998         curr_rq_unlock_irq_restore(&flags);
999 }
1000
1001 static u64 perf_counter_read(struct perf_counter *counter)
1002 {
1003         /*
1004          * If counter is enabled and currently active on a CPU, update the
1005          * value in the counter structure:
1006          */
1007         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1008                 smp_call_function_single(counter->oncpu,
1009                                          __read, counter, 1);
1010         }
1011
1012         return atomic64_read(&counter->count);
1013 }
1014
1015 /*
1016  * Cross CPU call to switch performance data pointers
1017  */
1018 static void __perf_switch_irq_data(void *info)
1019 {
1020         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1021         struct perf_counter *counter = info;
1022         struct perf_counter_context *ctx = counter->ctx;
1023         struct perf_data *oldirqdata = counter->irqdata;
1024
1025         /*
1026          * If this is a task context, we need to check whether it is
1027          * the current task context of this cpu. If not it has been
1028          * scheduled out before the smp call arrived.
1029          */
1030         if (ctx->task) {
1031                 if (cpuctx->task_ctx != ctx)
1032                         return;
1033                 spin_lock(&ctx->lock);
1034         }
1035
1036         /* Change the pointer NMI safe */
1037         atomic_long_set((atomic_long_t *)&counter->irqdata,
1038                         (unsigned long) counter->usrdata);
1039         counter->usrdata = oldirqdata;
1040
1041         if (ctx->task)
1042                 spin_unlock(&ctx->lock);
1043 }
1044
1045 static struct perf_data *perf_switch_irq_data(struct perf_counter *counter)
1046 {
1047         struct perf_counter_context *ctx = counter->ctx;
1048         struct perf_data *oldirqdata = counter->irqdata;
1049         struct task_struct *task = ctx->task;
1050
1051         if (!task) {
1052                 smp_call_function_single(counter->cpu,
1053                                          __perf_switch_irq_data,
1054                                          counter, 1);
1055                 return counter->usrdata;
1056         }
1057
1058 retry:
1059         spin_lock_irq(&ctx->lock);
1060         if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
1061                 counter->irqdata = counter->usrdata;
1062                 counter->usrdata = oldirqdata;
1063                 spin_unlock_irq(&ctx->lock);
1064                 return oldirqdata;
1065         }
1066         spin_unlock_irq(&ctx->lock);
1067         task_oncpu_function_call(task, __perf_switch_irq_data, counter);
1068         /* Might have failed, because task was scheduled out */
1069         if (counter->irqdata == oldirqdata)
1070                 goto retry;
1071
1072         return counter->usrdata;
1073 }
1074
1075 static void put_context(struct perf_counter_context *ctx)
1076 {
1077         if (ctx->task)
1078                 put_task_struct(ctx->task);
1079 }
1080
1081 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1082 {
1083         struct perf_cpu_context *cpuctx;
1084         struct perf_counter_context *ctx;
1085         struct task_struct *task;
1086
1087         /*
1088          * If cpu is not a wildcard then this is a percpu counter:
1089          */
1090         if (cpu != -1) {
1091                 /* Must be root to operate on a CPU counter: */
1092                 if (!capable(CAP_SYS_ADMIN))
1093                         return ERR_PTR(-EACCES);
1094
1095                 if (cpu < 0 || cpu > num_possible_cpus())
1096                         return ERR_PTR(-EINVAL);
1097
1098                 /*
1099                  * We could be clever and allow to attach a counter to an
1100                  * offline CPU and activate it when the CPU comes up, but
1101                  * that's for later.
1102                  */
1103                 if (!cpu_isset(cpu, cpu_online_map))
1104                         return ERR_PTR(-ENODEV);
1105
1106                 cpuctx = &per_cpu(perf_cpu_context, cpu);
1107                 ctx = &cpuctx->ctx;
1108
1109                 return ctx;
1110         }
1111
1112         rcu_read_lock();
1113         if (!pid)
1114                 task = current;
1115         else
1116                 task = find_task_by_vpid(pid);
1117         if (task)
1118                 get_task_struct(task);
1119         rcu_read_unlock();
1120
1121         if (!task)
1122                 return ERR_PTR(-ESRCH);
1123
1124         ctx = &task->perf_counter_ctx;
1125         ctx->task = task;
1126
1127         /* Reuse ptrace permission checks for now. */
1128         if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
1129                 put_context(ctx);
1130                 return ERR_PTR(-EACCES);
1131         }
1132
1133         return ctx;
1134 }
1135
1136 /*
1137  * Called when the last reference to the file is gone.
1138  */
1139 static int perf_release(struct inode *inode, struct file *file)
1140 {
1141         struct perf_counter *counter = file->private_data;
1142         struct perf_counter_context *ctx = counter->ctx;
1143
1144         file->private_data = NULL;
1145
1146         mutex_lock(&ctx->mutex);
1147         mutex_lock(&counter->mutex);
1148
1149         perf_counter_remove_from_context(counter);
1150
1151         mutex_unlock(&counter->mutex);
1152         mutex_unlock(&ctx->mutex);
1153
1154         kfree(counter);
1155         put_context(ctx);
1156
1157         return 0;
1158 }
1159
1160 /*
1161  * Read the performance counter - simple non blocking version for now
1162  */
1163 static ssize_t
1164 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1165 {
1166         u64 cntval;
1167
1168         if (count != sizeof(cntval))
1169                 return -EINVAL;
1170
1171         /*
1172          * Return end-of-file for a read on a counter that is in
1173          * error state (i.e. because it was pinned but it couldn't be
1174          * scheduled on to the CPU at some point).
1175          */
1176         if (counter->state == PERF_COUNTER_STATE_ERROR)
1177                 return 0;
1178
1179         mutex_lock(&counter->mutex);
1180         cntval = perf_counter_read(counter);
1181         mutex_unlock(&counter->mutex);
1182
1183         return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval);
1184 }
1185
1186 static ssize_t
1187 perf_copy_usrdata(struct perf_data *usrdata, char __user *buf, size_t count)
1188 {
1189         if (!usrdata->len)
1190                 return 0;
1191
1192         count = min(count, (size_t)usrdata->len);
1193         if (copy_to_user(buf, usrdata->data + usrdata->rd_idx, count))
1194                 return -EFAULT;
1195
1196         /* Adjust the counters */
1197         usrdata->len -= count;
1198         if (!usrdata->len)
1199                 usrdata->rd_idx = 0;
1200         else
1201                 usrdata->rd_idx += count;
1202
1203         return count;
1204 }
1205
1206 static ssize_t
1207 perf_read_irq_data(struct perf_counter  *counter,
1208                    char __user          *buf,
1209                    size_t               count,
1210                    int                  nonblocking)
1211 {
1212         struct perf_data *irqdata, *usrdata;
1213         DECLARE_WAITQUEUE(wait, current);
1214         ssize_t res, res2;
1215
1216         irqdata = counter->irqdata;
1217         usrdata = counter->usrdata;
1218
1219         if (usrdata->len + irqdata->len >= count)
1220                 goto read_pending;
1221
1222         if (nonblocking)
1223                 return -EAGAIN;
1224
1225         spin_lock_irq(&counter->waitq.lock);
1226         __add_wait_queue(&counter->waitq, &wait);
1227         for (;;) {
1228                 set_current_state(TASK_INTERRUPTIBLE);
1229                 if (usrdata->len + irqdata->len >= count)
1230                         break;
1231
1232                 if (signal_pending(current))
1233                         break;
1234
1235                 if (counter->state == PERF_COUNTER_STATE_ERROR)
1236                         break;
1237
1238                 spin_unlock_irq(&counter->waitq.lock);
1239                 schedule();
1240                 spin_lock_irq(&counter->waitq.lock);
1241         }
1242         __remove_wait_queue(&counter->waitq, &wait);
1243         __set_current_state(TASK_RUNNING);
1244         spin_unlock_irq(&counter->waitq.lock);
1245
1246         if (usrdata->len + irqdata->len < count &&
1247             counter->state != PERF_COUNTER_STATE_ERROR)
1248                 return -ERESTARTSYS;
1249 read_pending:
1250         mutex_lock(&counter->mutex);
1251
1252         /* Drain pending data first: */
1253         res = perf_copy_usrdata(usrdata, buf, count);
1254         if (res < 0 || res == count)
1255                 goto out;
1256
1257         /* Switch irq buffer: */
1258         usrdata = perf_switch_irq_data(counter);
1259         res2 = perf_copy_usrdata(usrdata, buf + res, count - res);
1260         if (res2 < 0) {
1261                 if (!res)
1262                         res = -EFAULT;
1263         } else {
1264                 res += res2;
1265         }
1266 out:
1267         mutex_unlock(&counter->mutex);
1268
1269         return res;
1270 }
1271
1272 static ssize_t
1273 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1274 {
1275         struct perf_counter *counter = file->private_data;
1276
1277         switch (counter->hw_event.record_type) {
1278         case PERF_RECORD_SIMPLE:
1279                 return perf_read_hw(counter, buf, count);
1280
1281         case PERF_RECORD_IRQ:
1282         case PERF_RECORD_GROUP:
1283                 return perf_read_irq_data(counter, buf, count,
1284                                           file->f_flags & O_NONBLOCK);
1285         }
1286         return -EINVAL;
1287 }
1288
1289 static unsigned int perf_poll(struct file *file, poll_table *wait)
1290 {
1291         struct perf_counter *counter = file->private_data;
1292         unsigned int events = 0;
1293         unsigned long flags;
1294
1295         poll_wait(file, &counter->waitq, wait);
1296
1297         spin_lock_irqsave(&counter->waitq.lock, flags);
1298         if (counter->usrdata->len || counter->irqdata->len)
1299                 events |= POLLIN;
1300         spin_unlock_irqrestore(&counter->waitq.lock, flags);
1301
1302         return events;
1303 }
1304
1305 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1306 {
1307         struct perf_counter *counter = file->private_data;
1308         int err = 0;
1309
1310         switch (cmd) {
1311         case PERF_COUNTER_IOC_ENABLE:
1312                 perf_counter_enable_family(counter);
1313                 break;
1314         case PERF_COUNTER_IOC_DISABLE:
1315                 perf_counter_disable_family(counter);
1316                 break;
1317         default:
1318                 err = -ENOTTY;
1319         }
1320         return err;
1321 }
1322
1323 static const struct file_operations perf_fops = {
1324         .release                = perf_release,
1325         .read                   = perf_read,
1326         .poll                   = perf_poll,
1327         .unlocked_ioctl         = perf_ioctl,
1328         .compat_ioctl           = perf_ioctl,
1329 };
1330
1331 /*
1332  * Generic software counter infrastructure
1333  */
1334
1335 static void perf_swcounter_update(struct perf_counter *counter)
1336 {
1337         struct hw_perf_counter *hwc = &counter->hw;
1338         u64 prev, now;
1339         s64 delta;
1340
1341 again:
1342         prev = atomic64_read(&hwc->prev_count);
1343         now = atomic64_read(&hwc->count);
1344         if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
1345                 goto again;
1346
1347         delta = now - prev;
1348
1349         atomic64_add(delta, &counter->count);
1350         atomic64_sub(delta, &hwc->period_left);
1351 }
1352
1353 static void perf_swcounter_set_period(struct perf_counter *counter)
1354 {
1355         struct hw_perf_counter *hwc = &counter->hw;
1356         s64 left = atomic64_read(&hwc->period_left);
1357         s64 period = hwc->irq_period;
1358
1359         if (unlikely(left <= -period)) {
1360                 left = period;
1361                 atomic64_set(&hwc->period_left, left);
1362         }
1363
1364         if (unlikely(left <= 0)) {
1365                 left += period;
1366                 atomic64_add(period, &hwc->period_left);
1367         }
1368
1369         atomic64_set(&hwc->prev_count, -left);
1370         atomic64_set(&hwc->count, -left);
1371 }
1372
1373 static void perf_swcounter_save_and_restart(struct perf_counter *counter)
1374 {
1375         perf_swcounter_update(counter);
1376         perf_swcounter_set_period(counter);
1377 }
1378
1379 static void perf_swcounter_store_irq(struct perf_counter *counter, u64 data)
1380 {
1381         struct perf_data *irqdata = counter->irqdata;
1382
1383         if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
1384                 irqdata->overrun++;
1385         } else {
1386                 u64 *p = (u64 *) &irqdata->data[irqdata->len];
1387
1388                 *p = data;
1389                 irqdata->len += sizeof(u64);
1390         }
1391 }
1392
1393 static void perf_swcounter_handle_group(struct perf_counter *sibling)
1394 {
1395         struct perf_counter *counter, *group_leader = sibling->group_leader;
1396
1397         list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
1398                 perf_swcounter_update(counter);
1399                 perf_swcounter_store_irq(sibling, counter->hw_event.type);
1400                 perf_swcounter_store_irq(sibling, atomic64_read(&counter->count));
1401         }
1402 }
1403
1404 static void perf_swcounter_interrupt(struct perf_counter *counter,
1405                                      int nmi, struct pt_regs *regs)
1406 {
1407         perf_swcounter_save_and_restart(counter);
1408
1409         switch (counter->hw_event.record_type) {
1410         case PERF_RECORD_SIMPLE:
1411                 break;
1412
1413         case PERF_RECORD_IRQ:
1414                 perf_swcounter_store_irq(counter, instruction_pointer(regs));
1415                 break;
1416
1417         case PERF_RECORD_GROUP:
1418                 perf_swcounter_handle_group(counter);
1419                 break;
1420         }
1421
1422         if (nmi) {
1423                 counter->wakeup_pending = 1;
1424                 set_tsk_thread_flag(current, TIF_PERF_COUNTERS);
1425         } else
1426                 wake_up(&counter->waitq);
1427 }
1428
1429 static int perf_swcounter_match(struct perf_counter *counter,
1430                                 enum hw_event_types event,
1431                                 struct pt_regs *regs)
1432 {
1433         if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1434                 return 0;
1435
1436         if (counter->hw_event.raw)
1437                 return 0;
1438
1439         if (counter->hw_event.type != event)
1440                 return 0;
1441
1442         if (counter->hw_event.exclude_user && user_mode(regs))
1443                 return 0;
1444
1445         if (counter->hw_event.exclude_kernel && !user_mode(regs))
1446                 return 0;
1447
1448         return 1;
1449 }
1450
1451 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
1452                                      enum hw_event_types event, u64 nr,
1453                                      int nmi, struct pt_regs *regs)
1454 {
1455         struct perf_counter *counter;
1456         unsigned long flags;
1457         int neg;
1458
1459         if (list_empty(&ctx->counter_list))
1460                 return;
1461
1462         spin_lock_irqsave(&ctx->lock, flags);
1463
1464         /*
1465          * XXX: make counter_list RCU safe
1466          */
1467         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1468                 if (perf_swcounter_match(counter, event, regs)) {
1469                         neg = atomic64_add_negative(nr, &counter->hw.count);
1470                         if (counter->hw.irq_period && !neg)
1471                                 perf_swcounter_interrupt(counter, nmi, regs);
1472                 }
1473         }
1474
1475         spin_unlock_irqrestore(&ctx->lock, flags);
1476 }
1477
1478 void perf_swcounter_event(enum hw_event_types event, u64 nr,
1479                           int nmi, struct pt_regs *regs)
1480 {
1481         struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
1482
1483         perf_swcounter_ctx_event(&cpuctx->ctx, event, nr, nmi, regs);
1484         if (cpuctx->task_ctx)
1485                 perf_swcounter_ctx_event(cpuctx->task_ctx, event, nr, nmi, regs);
1486
1487         put_cpu_var(perf_cpu_context);
1488 }
1489
1490 static void perf_swcounter_read(struct perf_counter *counter)
1491 {
1492         perf_swcounter_update(counter);
1493 }
1494
1495 static int perf_swcounter_enable(struct perf_counter *counter)
1496 {
1497         perf_swcounter_set_period(counter);
1498         return 0;
1499 }
1500
1501 static void perf_swcounter_disable(struct perf_counter *counter)
1502 {
1503         perf_swcounter_update(counter);
1504 }
1505
1506 static const struct hw_perf_counter_ops perf_ops_generic = {
1507         .enable         = perf_swcounter_enable,
1508         .disable        = perf_swcounter_disable,
1509         .read           = perf_swcounter_read,
1510 };
1511
1512 /*
1513  * Software counter: cpu wall time clock
1514  */
1515
1516 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
1517 {
1518         int cpu = raw_smp_processor_id();
1519
1520         atomic64_set(&counter->hw.prev_count, cpu_clock(cpu));
1521         return 0;
1522 }
1523
1524 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
1525 {
1526         int cpu = raw_smp_processor_id();
1527         s64 prev;
1528         u64 now;
1529
1530         now = cpu_clock(cpu);
1531         prev = atomic64_read(&counter->hw.prev_count);
1532         atomic64_set(&counter->hw.prev_count, now);
1533         atomic64_add(now - prev, &counter->count);
1534 }
1535
1536 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
1537 {
1538         cpu_clock_perf_counter_update(counter);
1539 }
1540
1541 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
1542 {
1543         cpu_clock_perf_counter_update(counter);
1544 }
1545
1546 static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
1547         .enable         = cpu_clock_perf_counter_enable,
1548         .disable        = cpu_clock_perf_counter_disable,
1549         .read           = cpu_clock_perf_counter_read,
1550 };
1551
1552 /*
1553  * Software counter: task time clock
1554  */
1555
1556 /*
1557  * Called from within the scheduler:
1558  */
1559 static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update)
1560 {
1561         struct task_struct *curr = counter->task;
1562         u64 delta;
1563
1564         delta = __task_delta_exec(curr, update);
1565
1566         return curr->se.sum_exec_runtime + delta;
1567 }
1568
1569 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
1570 {
1571         u64 prev;
1572         s64 delta;
1573
1574         prev = atomic64_read(&counter->hw.prev_count);
1575
1576         atomic64_set(&counter->hw.prev_count, now);
1577
1578         delta = now - prev;
1579
1580         atomic64_add(delta, &counter->count);
1581 }
1582
1583 static void task_clock_perf_counter_read(struct perf_counter *counter)
1584 {
1585         u64 now = task_clock_perf_counter_val(counter, 1);
1586
1587         task_clock_perf_counter_update(counter, now);
1588 }
1589
1590 static int task_clock_perf_counter_enable(struct perf_counter *counter)
1591 {
1592         if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1593                 atomic64_set(&counter->hw.prev_count,
1594                              task_clock_perf_counter_val(counter, 0));
1595
1596         return 0;
1597 }
1598
1599 static void task_clock_perf_counter_disable(struct perf_counter *counter)
1600 {
1601         u64 now = task_clock_perf_counter_val(counter, 0);
1602
1603         task_clock_perf_counter_update(counter, now);
1604 }
1605
1606 static const struct hw_perf_counter_ops perf_ops_task_clock = {
1607         .enable         = task_clock_perf_counter_enable,
1608         .disable        = task_clock_perf_counter_disable,
1609         .read           = task_clock_perf_counter_read,
1610 };
1611
1612 /*
1613  * Software counter: context switches
1614  */
1615
1616 static u64 get_context_switches(struct perf_counter *counter)
1617 {
1618         struct task_struct *curr = counter->ctx->task;
1619
1620         if (curr)
1621                 return curr->nvcsw + curr->nivcsw;
1622         return cpu_nr_switches(smp_processor_id());
1623 }
1624
1625 static void context_switches_perf_counter_update(struct perf_counter *counter)
1626 {
1627         u64 prev, now;
1628         s64 delta;
1629
1630         prev = atomic64_read(&counter->hw.prev_count);
1631         now = get_context_switches(counter);
1632
1633         atomic64_set(&counter->hw.prev_count, now);
1634
1635         delta = now - prev;
1636
1637         atomic64_add(delta, &counter->count);
1638 }
1639
1640 static void context_switches_perf_counter_read(struct perf_counter *counter)
1641 {
1642         context_switches_perf_counter_update(counter);
1643 }
1644
1645 static int context_switches_perf_counter_enable(struct perf_counter *counter)
1646 {
1647         if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1648                 atomic64_set(&counter->hw.prev_count,
1649                              get_context_switches(counter));
1650         return 0;
1651 }
1652
1653 static void context_switches_perf_counter_disable(struct perf_counter *counter)
1654 {
1655         context_switches_perf_counter_update(counter);
1656 }
1657
1658 static const struct hw_perf_counter_ops perf_ops_context_switches = {
1659         .enable         = context_switches_perf_counter_enable,
1660         .disable        = context_switches_perf_counter_disable,
1661         .read           = context_switches_perf_counter_read,
1662 };
1663
1664 /*
1665  * Software counter: cpu migrations
1666  */
1667
1668 static inline u64 get_cpu_migrations(struct perf_counter *counter)
1669 {
1670         struct task_struct *curr = counter->ctx->task;
1671
1672         if (curr)
1673                 return curr->se.nr_migrations;
1674         return cpu_nr_migrations(smp_processor_id());
1675 }
1676
1677 static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
1678 {
1679         u64 prev, now;
1680         s64 delta;
1681
1682         prev = atomic64_read(&counter->hw.prev_count);
1683         now = get_cpu_migrations(counter);
1684
1685         atomic64_set(&counter->hw.prev_count, now);
1686
1687         delta = now - prev;
1688
1689         atomic64_add(delta, &counter->count);
1690 }
1691
1692 static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
1693 {
1694         cpu_migrations_perf_counter_update(counter);
1695 }
1696
1697 static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
1698 {
1699         if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1700                 atomic64_set(&counter->hw.prev_count,
1701                              get_cpu_migrations(counter));
1702         return 0;
1703 }
1704
1705 static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
1706 {
1707         cpu_migrations_perf_counter_update(counter);
1708 }
1709
1710 static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
1711         .enable         = cpu_migrations_perf_counter_enable,
1712         .disable        = cpu_migrations_perf_counter_disable,
1713         .read           = cpu_migrations_perf_counter_read,
1714 };
1715
1716 static const struct hw_perf_counter_ops *
1717 sw_perf_counter_init(struct perf_counter *counter)
1718 {
1719         struct perf_counter_hw_event *hw_event = &counter->hw_event;
1720         const struct hw_perf_counter_ops *hw_ops = NULL;
1721         struct hw_perf_counter *hwc = &counter->hw;
1722
1723         /*
1724          * Software counters (currently) can't in general distinguish
1725          * between user, kernel and hypervisor events.
1726          * However, context switches and cpu migrations are considered
1727          * to be kernel events, and page faults are never hypervisor
1728          * events.
1729          */
1730         switch (counter->hw_event.type) {
1731         case PERF_COUNT_CPU_CLOCK:
1732                 if (!(counter->hw_event.exclude_user ||
1733                       counter->hw_event.exclude_kernel ||
1734                       counter->hw_event.exclude_hv))
1735                         hw_ops = &perf_ops_cpu_clock;
1736                 break;
1737         case PERF_COUNT_TASK_CLOCK:
1738                 if (counter->hw_event.exclude_user ||
1739                     counter->hw_event.exclude_kernel ||
1740                     counter->hw_event.exclude_hv)
1741                         break;
1742                 /*
1743                  * If the user instantiates this as a per-cpu counter,
1744                  * use the cpu_clock counter instead.
1745                  */
1746                 if (counter->ctx->task)
1747                         hw_ops = &perf_ops_task_clock;
1748                 else
1749                         hw_ops = &perf_ops_cpu_clock;
1750                 break;
1751         case PERF_COUNT_PAGE_FAULTS:
1752         case PERF_COUNT_PAGE_FAULTS_MIN:
1753         case PERF_COUNT_PAGE_FAULTS_MAJ:
1754                 hw_ops = &perf_ops_generic;
1755                 break;
1756         case PERF_COUNT_CONTEXT_SWITCHES:
1757                 if (!counter->hw_event.exclude_kernel)
1758                         hw_ops = &perf_ops_context_switches;
1759                 break;
1760         case PERF_COUNT_CPU_MIGRATIONS:
1761                 if (!counter->hw_event.exclude_kernel)
1762                         hw_ops = &perf_ops_cpu_migrations;
1763                 break;
1764         default:
1765                 break;
1766         }
1767
1768         if (hw_ops)
1769                 hwc->irq_period = hw_event->irq_period;
1770
1771         return hw_ops;
1772 }
1773
1774 /*
1775  * Allocate and initialize a counter structure
1776  */
1777 static struct perf_counter *
1778 perf_counter_alloc(struct perf_counter_hw_event *hw_event,
1779                    int cpu,
1780                    struct perf_counter_context *ctx,
1781                    struct perf_counter *group_leader,
1782                    gfp_t gfpflags)
1783 {
1784         const struct hw_perf_counter_ops *hw_ops;
1785         struct perf_counter *counter;
1786
1787         counter = kzalloc(sizeof(*counter), gfpflags);
1788         if (!counter)
1789                 return NULL;
1790
1791         /*
1792          * Single counters are their own group leaders, with an
1793          * empty sibling list:
1794          */
1795         if (!group_leader)
1796                 group_leader = counter;
1797
1798         mutex_init(&counter->mutex);
1799         INIT_LIST_HEAD(&counter->list_entry);
1800         INIT_LIST_HEAD(&counter->sibling_list);
1801         init_waitqueue_head(&counter->waitq);
1802
1803         INIT_LIST_HEAD(&counter->child_list);
1804
1805         counter->irqdata                = &counter->data[0];
1806         counter->usrdata                = &counter->data[1];
1807         counter->cpu                    = cpu;
1808         counter->hw_event               = *hw_event;
1809         counter->wakeup_pending         = 0;
1810         counter->group_leader           = group_leader;
1811         counter->hw_ops                 = NULL;
1812         counter->ctx                    = ctx;
1813
1814         counter->state = PERF_COUNTER_STATE_INACTIVE;
1815         if (hw_event->disabled)
1816                 counter->state = PERF_COUNTER_STATE_OFF;
1817
1818         hw_ops = NULL;
1819         if (!hw_event->raw && hw_event->type < 0)
1820                 hw_ops = sw_perf_counter_init(counter);
1821         else
1822                 hw_ops = hw_perf_counter_init(counter);
1823
1824         if (!hw_ops) {
1825                 kfree(counter);
1826                 return NULL;
1827         }
1828         counter->hw_ops = hw_ops;
1829
1830         return counter;
1831 }
1832
1833 /**
1834  * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
1835  *
1836  * @hw_event_uptr:      event type attributes for monitoring/sampling
1837  * @pid:                target pid
1838  * @cpu:                target cpu
1839  * @group_fd:           group leader counter fd
1840  */
1841 SYSCALL_DEFINE5(perf_counter_open,
1842                 const struct perf_counter_hw_event __user *, hw_event_uptr,
1843                 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
1844 {
1845         struct perf_counter *counter, *group_leader;
1846         struct perf_counter_hw_event hw_event;
1847         struct perf_counter_context *ctx;
1848         struct file *counter_file = NULL;
1849         struct file *group_file = NULL;
1850         int fput_needed = 0;
1851         int fput_needed2 = 0;
1852         int ret;
1853
1854         /* for future expandability... */
1855         if (flags)
1856                 return -EINVAL;
1857
1858         if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
1859                 return -EFAULT;
1860
1861         /*
1862          * Get the target context (task or percpu):
1863          */
1864         ctx = find_get_context(pid, cpu);
1865         if (IS_ERR(ctx))
1866                 return PTR_ERR(ctx);
1867
1868         /*
1869          * Look up the group leader (we will attach this counter to it):
1870          */
1871         group_leader = NULL;
1872         if (group_fd != -1) {
1873                 ret = -EINVAL;
1874                 group_file = fget_light(group_fd, &fput_needed);
1875                 if (!group_file)
1876                         goto err_put_context;
1877                 if (group_file->f_op != &perf_fops)
1878                         goto err_put_context;
1879
1880                 group_leader = group_file->private_data;
1881                 /*
1882                  * Do not allow a recursive hierarchy (this new sibling
1883                  * becoming part of another group-sibling):
1884                  */
1885                 if (group_leader->group_leader != group_leader)
1886                         goto err_put_context;
1887                 /*
1888                  * Do not allow to attach to a group in a different
1889                  * task or CPU context:
1890                  */
1891                 if (group_leader->ctx != ctx)
1892                         goto err_put_context;
1893                 /*
1894                  * Only a group leader can be exclusive or pinned
1895                  */
1896                 if (hw_event.exclusive || hw_event.pinned)
1897                         goto err_put_context;
1898         }
1899
1900         ret = -EINVAL;
1901         counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
1902                                      GFP_KERNEL);
1903         if (!counter)
1904                 goto err_put_context;
1905
1906         ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
1907         if (ret < 0)
1908                 goto err_free_put_context;
1909
1910         counter_file = fget_light(ret, &fput_needed2);
1911         if (!counter_file)
1912                 goto err_free_put_context;
1913
1914         counter->filp = counter_file;
1915         mutex_lock(&ctx->mutex);
1916         perf_install_in_context(ctx, counter, cpu);
1917         mutex_unlock(&ctx->mutex);
1918
1919         fput_light(counter_file, fput_needed2);
1920
1921 out_fput:
1922         fput_light(group_file, fput_needed);
1923
1924         return ret;
1925
1926 err_free_put_context:
1927         kfree(counter);
1928
1929 err_put_context:
1930         put_context(ctx);
1931
1932         goto out_fput;
1933 }
1934
1935 /*
1936  * Initialize the perf_counter context in a task_struct:
1937  */
1938 static void
1939 __perf_counter_init_context(struct perf_counter_context *ctx,
1940                             struct task_struct *task)
1941 {
1942         memset(ctx, 0, sizeof(*ctx));
1943         spin_lock_init(&ctx->lock);
1944         mutex_init(&ctx->mutex);
1945         INIT_LIST_HEAD(&ctx->counter_list);
1946         ctx->task = task;
1947 }
1948
1949 /*
1950  * inherit a counter from parent task to child task:
1951  */
1952 static struct perf_counter *
1953 inherit_counter(struct perf_counter *parent_counter,
1954               struct task_struct *parent,
1955               struct perf_counter_context *parent_ctx,
1956               struct task_struct *child,
1957               struct perf_counter *group_leader,
1958               struct perf_counter_context *child_ctx)
1959 {
1960         struct perf_counter *child_counter;
1961
1962         /*
1963          * Instead of creating recursive hierarchies of counters,
1964          * we link inherited counters back to the original parent,
1965          * which has a filp for sure, which we use as the reference
1966          * count:
1967          */
1968         if (parent_counter->parent)
1969                 parent_counter = parent_counter->parent;
1970
1971         child_counter = perf_counter_alloc(&parent_counter->hw_event,
1972                                            parent_counter->cpu, child_ctx,
1973                                            group_leader, GFP_KERNEL);
1974         if (!child_counter)
1975                 return NULL;
1976
1977         /*
1978          * Link it up in the child's context:
1979          */
1980         child_counter->task = child;
1981         list_add_counter(child_counter, child_ctx);
1982         child_ctx->nr_counters++;
1983
1984         child_counter->parent = parent_counter;
1985         /*
1986          * inherit into child's child as well:
1987          */
1988         child_counter->hw_event.inherit = 1;
1989
1990         /*
1991          * Get a reference to the parent filp - we will fput it
1992          * when the child counter exits. This is safe to do because
1993          * we are in the parent and we know that the filp still
1994          * exists and has a nonzero count:
1995          */
1996         atomic_long_inc(&parent_counter->filp->f_count);
1997
1998         /*
1999          * Link this into the parent counter's child list
2000          */
2001         mutex_lock(&parent_counter->mutex);
2002         list_add_tail(&child_counter->child_list, &parent_counter->child_list);
2003
2004         /*
2005          * Make the child state follow the state of the parent counter,
2006          * not its hw_event.disabled bit.  We hold the parent's mutex,
2007          * so we won't race with perf_counter_{en,dis}able_family.
2008          */
2009         if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
2010                 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
2011         else
2012                 child_counter->state = PERF_COUNTER_STATE_OFF;
2013
2014         mutex_unlock(&parent_counter->mutex);
2015
2016         return child_counter;
2017 }
2018
2019 static int inherit_group(struct perf_counter *parent_counter,
2020               struct task_struct *parent,
2021               struct perf_counter_context *parent_ctx,
2022               struct task_struct *child,
2023               struct perf_counter_context *child_ctx)
2024 {
2025         struct perf_counter *leader;
2026         struct perf_counter *sub;
2027
2028         leader = inherit_counter(parent_counter, parent, parent_ctx,
2029                                  child, NULL, child_ctx);
2030         if (!leader)
2031                 return -ENOMEM;
2032         list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
2033                 if (!inherit_counter(sub, parent, parent_ctx,
2034                                      child, leader, child_ctx))
2035                         return -ENOMEM;
2036         }
2037         return 0;
2038 }
2039
2040 static void sync_child_counter(struct perf_counter *child_counter,
2041                                struct perf_counter *parent_counter)
2042 {
2043         u64 parent_val, child_val;
2044
2045         parent_val = atomic64_read(&parent_counter->count);
2046         child_val = atomic64_read(&child_counter->count);
2047
2048         /*
2049          * Add back the child's count to the parent's count:
2050          */
2051         atomic64_add(child_val, &parent_counter->count);
2052
2053         /*
2054          * Remove this counter from the parent's list
2055          */
2056         mutex_lock(&parent_counter->mutex);
2057         list_del_init(&child_counter->child_list);
2058         mutex_unlock(&parent_counter->mutex);
2059
2060         /*
2061          * Release the parent counter, if this was the last
2062          * reference to it.
2063          */
2064         fput(parent_counter->filp);
2065 }
2066
2067 static void
2068 __perf_counter_exit_task(struct task_struct *child,
2069                          struct perf_counter *child_counter,
2070                          struct perf_counter_context *child_ctx)
2071 {
2072         struct perf_counter *parent_counter;
2073         struct perf_counter *sub, *tmp;
2074
2075         /*
2076          * If we do not self-reap then we have to wait for the
2077          * child task to unschedule (it will happen for sure),
2078          * so that its counter is at its final count. (This
2079          * condition triggers rarely - child tasks usually get
2080          * off their CPU before the parent has a chance to
2081          * get this far into the reaping action)
2082          */
2083         if (child != current) {
2084                 wait_task_inactive(child, 0);
2085                 list_del_init(&child_counter->list_entry);
2086         } else {
2087                 struct perf_cpu_context *cpuctx;
2088                 unsigned long flags;
2089                 u64 perf_flags;
2090
2091                 /*
2092                  * Disable and unlink this counter.
2093                  *
2094                  * Be careful about zapping the list - IRQ/NMI context
2095                  * could still be processing it:
2096                  */
2097                 curr_rq_lock_irq_save(&flags);
2098                 perf_flags = hw_perf_save_disable();
2099
2100                 cpuctx = &__get_cpu_var(perf_cpu_context);
2101
2102                 group_sched_out(child_counter, cpuctx, child_ctx);
2103
2104                 list_del_init(&child_counter->list_entry);
2105
2106                 child_ctx->nr_counters--;
2107
2108                 hw_perf_restore(perf_flags);
2109                 curr_rq_unlock_irq_restore(&flags);
2110         }
2111
2112         parent_counter = child_counter->parent;
2113         /*
2114          * It can happen that parent exits first, and has counters
2115          * that are still around due to the child reference. These
2116          * counters need to be zapped - but otherwise linger.
2117          */
2118         if (parent_counter) {
2119                 sync_child_counter(child_counter, parent_counter);
2120                 list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list,
2121                                          list_entry) {
2122                         if (sub->parent) {
2123                                 sync_child_counter(sub, sub->parent);
2124                                 kfree(sub);
2125                         }
2126                 }
2127                 kfree(child_counter);
2128         }
2129 }
2130
2131 /*
2132  * When a child task exits, feed back counter values to parent counters.
2133  *
2134  * Note: we may be running in child context, but the PID is not hashed
2135  * anymore so new counters will not be added.
2136  */
2137 void perf_counter_exit_task(struct task_struct *child)
2138 {
2139         struct perf_counter *child_counter, *tmp;
2140         struct perf_counter_context *child_ctx;
2141
2142         child_ctx = &child->perf_counter_ctx;
2143
2144         if (likely(!child_ctx->nr_counters))
2145                 return;
2146
2147         list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
2148                                  list_entry)
2149                 __perf_counter_exit_task(child, child_counter, child_ctx);
2150 }
2151
2152 /*
2153  * Initialize the perf_counter context in task_struct
2154  */
2155 void perf_counter_init_task(struct task_struct *child)
2156 {
2157         struct perf_counter_context *child_ctx, *parent_ctx;
2158         struct perf_counter *counter;
2159         struct task_struct *parent = current;
2160
2161         child_ctx  =  &child->perf_counter_ctx;
2162         parent_ctx = &parent->perf_counter_ctx;
2163
2164         __perf_counter_init_context(child_ctx, child);
2165
2166         /*
2167          * This is executed from the parent task context, so inherit
2168          * counters that have been marked for cloning:
2169          */
2170
2171         if (likely(!parent_ctx->nr_counters))
2172                 return;
2173
2174         /*
2175          * Lock the parent list. No need to lock the child - not PID
2176          * hashed yet and not running, so nobody can access it.
2177          */
2178         mutex_lock(&parent_ctx->mutex);
2179
2180         /*
2181          * We dont have to disable NMIs - we are only looking at
2182          * the list, not manipulating it:
2183          */
2184         list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) {
2185                 if (!counter->hw_event.inherit)
2186                         continue;
2187
2188                 if (inherit_group(counter, parent,
2189                                   parent_ctx, child, child_ctx))
2190                         break;
2191         }
2192
2193         mutex_unlock(&parent_ctx->mutex);
2194 }
2195
2196 static void __cpuinit perf_counter_init_cpu(int cpu)
2197 {
2198         struct perf_cpu_context *cpuctx;
2199
2200         cpuctx = &per_cpu(perf_cpu_context, cpu);
2201         __perf_counter_init_context(&cpuctx->ctx, NULL);
2202
2203         mutex_lock(&perf_resource_mutex);
2204         cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
2205         mutex_unlock(&perf_resource_mutex);
2206
2207         hw_perf_counter_setup(cpu);
2208 }
2209
2210 #ifdef CONFIG_HOTPLUG_CPU
2211 static void __perf_counter_exit_cpu(void *info)
2212 {
2213         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
2214         struct perf_counter_context *ctx = &cpuctx->ctx;
2215         struct perf_counter *counter, *tmp;
2216
2217         list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
2218                 __perf_counter_remove_from_context(counter);
2219 }
2220 static void perf_counter_exit_cpu(int cpu)
2221 {
2222         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
2223         struct perf_counter_context *ctx = &cpuctx->ctx;
2224
2225         mutex_lock(&ctx->mutex);
2226         smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
2227         mutex_unlock(&ctx->mutex);
2228 }
2229 #else
2230 static inline void perf_counter_exit_cpu(int cpu) { }
2231 #endif
2232
2233 static int __cpuinit
2234 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
2235 {
2236         unsigned int cpu = (long)hcpu;
2237
2238         switch (action) {
2239
2240         case CPU_UP_PREPARE:
2241         case CPU_UP_PREPARE_FROZEN:
2242                 perf_counter_init_cpu(cpu);
2243                 break;
2244
2245         case CPU_DOWN_PREPARE:
2246         case CPU_DOWN_PREPARE_FROZEN:
2247                 perf_counter_exit_cpu(cpu);
2248                 break;
2249
2250         default:
2251                 break;
2252         }
2253
2254         return NOTIFY_OK;
2255 }
2256
2257 static struct notifier_block __cpuinitdata perf_cpu_nb = {
2258         .notifier_call          = perf_cpu_notify,
2259 };
2260
2261 static int __init perf_counter_init(void)
2262 {
2263         perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
2264                         (void *)(long)smp_processor_id());
2265         register_cpu_notifier(&perf_cpu_nb);
2266
2267         return 0;
2268 }
2269 early_initcall(perf_counter_init);
2270
2271 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
2272 {
2273         return sprintf(buf, "%d\n", perf_reserved_percpu);
2274 }
2275
2276 static ssize_t
2277 perf_set_reserve_percpu(struct sysdev_class *class,
2278                         const char *buf,
2279                         size_t count)
2280 {
2281         struct perf_cpu_context *cpuctx;
2282         unsigned long val;
2283         int err, cpu, mpt;
2284
2285         err = strict_strtoul(buf, 10, &val);
2286         if (err)
2287                 return err;
2288         if (val > perf_max_counters)
2289                 return -EINVAL;
2290
2291         mutex_lock(&perf_resource_mutex);
2292         perf_reserved_percpu = val;
2293         for_each_online_cpu(cpu) {
2294                 cpuctx = &per_cpu(perf_cpu_context, cpu);
2295                 spin_lock_irq(&cpuctx->ctx.lock);
2296                 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
2297                           perf_max_counters - perf_reserved_percpu);
2298                 cpuctx->max_pertask = mpt;
2299                 spin_unlock_irq(&cpuctx->ctx.lock);
2300         }
2301         mutex_unlock(&perf_resource_mutex);
2302
2303         return count;
2304 }
2305
2306 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
2307 {
2308         return sprintf(buf, "%d\n", perf_overcommit);
2309 }
2310
2311 static ssize_t
2312 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
2313 {
2314         unsigned long val;
2315         int err;
2316
2317         err = strict_strtoul(buf, 10, &val);
2318         if (err)
2319                 return err;
2320         if (val > 1)
2321                 return -EINVAL;
2322
2323         mutex_lock(&perf_resource_mutex);
2324         perf_overcommit = val;
2325         mutex_unlock(&perf_resource_mutex);
2326
2327         return count;
2328 }
2329
2330 static SYSDEV_CLASS_ATTR(
2331                                 reserve_percpu,
2332                                 0644,
2333                                 perf_show_reserve_percpu,
2334                                 perf_set_reserve_percpu
2335                         );
2336
2337 static SYSDEV_CLASS_ATTR(
2338                                 overcommit,
2339                                 0644,
2340                                 perf_show_overcommit,
2341                                 perf_set_overcommit
2342                         );
2343
2344 static struct attribute *perfclass_attrs[] = {
2345         &attr_reserve_percpu.attr,
2346         &attr_overcommit.attr,
2347         NULL
2348 };
2349
2350 static struct attribute_group perfclass_attr_group = {
2351         .attrs                  = perfclass_attrs,
2352         .name                   = "perf_counters",
2353 };
2354
2355 static int __init perf_counter_sysfs_init(void)
2356 {
2357         return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
2358                                   &perfclass_attr_group);
2359 }
2360 device_initcall(perf_counter_sysfs_init);