]> git.karo-electronics.de Git - karo-tx-linux.git/blob - include/linux/perf_event.h
perf_events: Fix races and clean up perf_event and perf_mmap_data interaction
[karo-tx-linux.git] / include / linux / perf_event.h
1 /*
2  * Performance events:
3  *
4  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5  *    Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
6  *    Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
7  *
8  * Data type definitions, declarations, prototypes.
9  *
10  *    Started by: Thomas Gleixner and Ingo Molnar
11  *
12  * For licencing details see kernel-base/COPYING
13  */
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
16
17 #include <linux/types.h>
18 #include <linux/ioctl.h>
19 #include <asm/byteorder.h>
20
21 /*
22  * User-space ABI bits:
23  */
24
25 /*
26  * attr.type
27  */
28 enum perf_type_id {
29         PERF_TYPE_HARDWARE                      = 0,
30         PERF_TYPE_SOFTWARE                      = 1,
31         PERF_TYPE_TRACEPOINT                    = 2,
32         PERF_TYPE_HW_CACHE                      = 3,
33         PERF_TYPE_RAW                           = 4,
34         PERF_TYPE_BREAKPOINT                    = 5,
35
36         PERF_TYPE_MAX,                          /* non-ABI */
37 };
38
39 /*
40  * Generalized performance event event_id types, used by the
41  * attr.event_id parameter of the sys_perf_event_open()
42  * syscall:
43  */
44 enum perf_hw_id {
45         /*
46          * Common hardware events, generalized by the kernel:
47          */
48         PERF_COUNT_HW_CPU_CYCLES                = 0,
49         PERF_COUNT_HW_INSTRUCTIONS              = 1,
50         PERF_COUNT_HW_CACHE_REFERENCES          = 2,
51         PERF_COUNT_HW_CACHE_MISSES              = 3,
52         PERF_COUNT_HW_BRANCH_INSTRUCTIONS       = 4,
53         PERF_COUNT_HW_BRANCH_MISSES             = 5,
54         PERF_COUNT_HW_BUS_CYCLES                = 6,
55
56         PERF_COUNT_HW_MAX,                      /* non-ABI */
57 };
58
59 /*
60  * Generalized hardware cache events:
61  *
62  *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
63  *       { read, write, prefetch } x
64  *       { accesses, misses }
65  */
66 enum perf_hw_cache_id {
67         PERF_COUNT_HW_CACHE_L1D                 = 0,
68         PERF_COUNT_HW_CACHE_L1I                 = 1,
69         PERF_COUNT_HW_CACHE_LL                  = 2,
70         PERF_COUNT_HW_CACHE_DTLB                = 3,
71         PERF_COUNT_HW_CACHE_ITLB                = 4,
72         PERF_COUNT_HW_CACHE_BPU                 = 5,
73
74         PERF_COUNT_HW_CACHE_MAX,                /* non-ABI */
75 };
76
77 enum perf_hw_cache_op_id {
78         PERF_COUNT_HW_CACHE_OP_READ             = 0,
79         PERF_COUNT_HW_CACHE_OP_WRITE            = 1,
80         PERF_COUNT_HW_CACHE_OP_PREFETCH         = 2,
81
82         PERF_COUNT_HW_CACHE_OP_MAX,             /* non-ABI */
83 };
84
85 enum perf_hw_cache_op_result_id {
86         PERF_COUNT_HW_CACHE_RESULT_ACCESS       = 0,
87         PERF_COUNT_HW_CACHE_RESULT_MISS         = 1,
88
89         PERF_COUNT_HW_CACHE_RESULT_MAX,         /* non-ABI */
90 };
91
92 /*
93  * Special "software" events provided by the kernel, even if the hardware
94  * does not support performance events. These events measure various
95  * physical and sw events of the kernel (and allow the profiling of them as
96  * well):
97  */
98 enum perf_sw_ids {
99         PERF_COUNT_SW_CPU_CLOCK                 = 0,
100         PERF_COUNT_SW_TASK_CLOCK                = 1,
101         PERF_COUNT_SW_PAGE_FAULTS               = 2,
102         PERF_COUNT_SW_CONTEXT_SWITCHES          = 3,
103         PERF_COUNT_SW_CPU_MIGRATIONS            = 4,
104         PERF_COUNT_SW_PAGE_FAULTS_MIN           = 5,
105         PERF_COUNT_SW_PAGE_FAULTS_MAJ           = 6,
106         PERF_COUNT_SW_ALIGNMENT_FAULTS          = 7,
107         PERF_COUNT_SW_EMULATION_FAULTS          = 8,
108
109         PERF_COUNT_SW_MAX,                      /* non-ABI */
110 };
111
112 /*
113  * Bits that can be set in attr.sample_type to request information
114  * in the overflow packets.
115  */
116 enum perf_event_sample_format {
117         PERF_SAMPLE_IP                          = 1U << 0,
118         PERF_SAMPLE_TID                         = 1U << 1,
119         PERF_SAMPLE_TIME                        = 1U << 2,
120         PERF_SAMPLE_ADDR                        = 1U << 3,
121         PERF_SAMPLE_READ                        = 1U << 4,
122         PERF_SAMPLE_CALLCHAIN                   = 1U << 5,
123         PERF_SAMPLE_ID                          = 1U << 6,
124         PERF_SAMPLE_CPU                         = 1U << 7,
125         PERF_SAMPLE_PERIOD                      = 1U << 8,
126         PERF_SAMPLE_STREAM_ID                   = 1U << 9,
127         PERF_SAMPLE_RAW                         = 1U << 10,
128
129         PERF_SAMPLE_MAX = 1U << 11,             /* non-ABI */
130 };
131
132 /*
133  * The format of the data returned by read() on a perf event fd,
134  * as specified by attr.read_format:
135  *
136  * struct read_format {
137  *      { u64           value;
138  *        { u64         time_enabled; } && PERF_FORMAT_ENABLED
139  *        { u64         time_running; } && PERF_FORMAT_RUNNING
140  *        { u64         id;           } && PERF_FORMAT_ID
141  *      } && !PERF_FORMAT_GROUP
142  *
143  *      { u64           nr;
144  *        { u64         time_enabled; } && PERF_FORMAT_ENABLED
145  *        { u64         time_running; } && PERF_FORMAT_RUNNING
146  *        { u64         value;
147  *          { u64       id;           } && PERF_FORMAT_ID
148  *        }             cntr[nr];
149  *      } && PERF_FORMAT_GROUP
150  * };
151  */
152 enum perf_event_read_format {
153         PERF_FORMAT_TOTAL_TIME_ENABLED          = 1U << 0,
154         PERF_FORMAT_TOTAL_TIME_RUNNING          = 1U << 1,
155         PERF_FORMAT_ID                          = 1U << 2,
156         PERF_FORMAT_GROUP                       = 1U << 3,
157
158         PERF_FORMAT_MAX = 1U << 4,              /* non-ABI */
159 };
160
161 #define PERF_ATTR_SIZE_VER0     64      /* sizeof first published struct */
162
163 /*
164  * Hardware event_id to monitor via a performance monitoring event:
165  */
166 struct perf_event_attr {
167
168         /*
169          * Major type: hardware/software/tracepoint/etc.
170          */
171         __u32                   type;
172
173         /*
174          * Size of the attr structure, for fwd/bwd compat.
175          */
176         __u32                   size;
177
178         /*
179          * Type specific configuration information.
180          */
181         __u64                   config;
182
183         union {
184                 __u64           sample_period;
185                 __u64           sample_freq;
186         };
187
188         __u64                   sample_type;
189         __u64                   read_format;
190
191         __u64                   disabled       :  1, /* off by default        */
192                                 inherit        :  1, /* children inherit it   */
193                                 pinned         :  1, /* must always be on PMU */
194                                 exclusive      :  1, /* only group on PMU     */
195                                 exclude_user   :  1, /* don't count user      */
196                                 exclude_kernel :  1, /* ditto kernel          */
197                                 exclude_hv     :  1, /* ditto hypervisor      */
198                                 exclude_idle   :  1, /* don't count when idle */
199                                 mmap           :  1, /* include mmap data     */
200                                 comm           :  1, /* include comm data     */
201                                 freq           :  1, /* use freq, not period  */
202                                 inherit_stat   :  1, /* per task counts       */
203                                 enable_on_exec :  1, /* next exec enables     */
204                                 task           :  1, /* trace fork/exit       */
205                                 watermark      :  1, /* wakeup_watermark      */
206                                 /*
207                                  * precise_ip:
208                                  *
209                                  *  0 - SAMPLE_IP can have arbitrary skid
210                                  *  1 - SAMPLE_IP must have constant skid
211                                  *  2 - SAMPLE_IP requested to have 0 skid
212                                  *  3 - SAMPLE_IP must have 0 skid
213                                  *
214                                  *  See also PERF_RECORD_MISC_EXACT_IP
215                                  */
216                                 precise_ip     :  2, /* skid constraint       */
217
218                                 __reserved_1   : 47;
219
220         union {
221                 __u32           wakeup_events;    /* wakeup every n events */
222                 __u32           wakeup_watermark; /* bytes before wakeup   */
223         };
224
225         __u32                   bp_type;
226         __u64                   bp_addr;
227         __u64                   bp_len;
228 };
229
230 /*
231  * Ioctls that can be done on a perf event fd:
232  */
233 #define PERF_EVENT_IOC_ENABLE           _IO ('$', 0)
234 #define PERF_EVENT_IOC_DISABLE          _IO ('$', 1)
235 #define PERF_EVENT_IOC_REFRESH          _IO ('$', 2)
236 #define PERF_EVENT_IOC_RESET            _IO ('$', 3)
237 #define PERF_EVENT_IOC_PERIOD           _IOW('$', 4, __u64)
238 #define PERF_EVENT_IOC_SET_OUTPUT       _IO ('$', 5)
239 #define PERF_EVENT_IOC_SET_FILTER       _IOW('$', 6, char *)
240
241 enum perf_event_ioc_flags {
242         PERF_IOC_FLAG_GROUP             = 1U << 0,
243 };
244
245 /*
246  * Structure of the page that can be mapped via mmap
247  */
248 struct perf_event_mmap_page {
249         __u32   version;                /* version number of this structure */
250         __u32   compat_version;         /* lowest version this is compat with */
251
252         /*
253          * Bits needed to read the hw events in user-space.
254          *
255          *   u32 seq;
256          *   s64 count;
257          *
258          *   do {
259          *     seq = pc->lock;
260          *
261          *     barrier()
262          *     if (pc->index) {
263          *       count = pmc_read(pc->index - 1);
264          *       count += pc->offset;
265          *     } else
266          *       goto regular_read;
267          *
268          *     barrier();
269          *   } while (pc->lock != seq);
270          *
271          * NOTE: for obvious reason this only works on self-monitoring
272          *       processes.
273          */
274         __u32   lock;                   /* seqlock for synchronization */
275         __u32   index;                  /* hardware event identifier */
276         __s64   offset;                 /* add to hardware event value */
277         __u64   time_enabled;           /* time event active */
278         __u64   time_running;           /* time event on cpu */
279
280                 /*
281                  * Hole for extension of the self monitor capabilities
282                  */
283
284         __u64   __reserved[123];        /* align to 1k */
285
286         /*
287          * Control data for the mmap() data buffer.
288          *
289          * User-space reading the @data_head value should issue an rmb(), on
290          * SMP capable platforms, after reading this value -- see
291          * perf_event_wakeup().
292          *
293          * When the mapping is PROT_WRITE the @data_tail value should be
294          * written by userspace to reflect the last read data. In this case
295          * the kernel will not over-write unread data.
296          */
297         __u64   data_head;              /* head in the data section */
298         __u64   data_tail;              /* user-space written tail */
299 };
300
301 #define PERF_RECORD_MISC_CPUMODE_MASK           (7 << 0)
302 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN        (0 << 0)
303 #define PERF_RECORD_MISC_KERNEL                 (1 << 0)
304 #define PERF_RECORD_MISC_USER                   (2 << 0)
305 #define PERF_RECORD_MISC_HYPERVISOR             (3 << 0)
306 #define PERF_RECORD_MISC_GUEST_KERNEL           (4 << 0)
307 #define PERF_RECORD_MISC_GUEST_USER             (5 << 0)
308
309 /*
310  * Indicates that the content of PERF_SAMPLE_IP points to
311  * the actual instruction that triggered the event. See also
312  * perf_event_attr::precise_ip.
313  */
314 #define PERF_RECORD_MISC_EXACT_IP               (1 << 14)
315 /*
316  * Reserve the last bit to indicate some extended misc field
317  */
318 #define PERF_RECORD_MISC_EXT_RESERVED           (1 << 15)
319
320 struct perf_event_header {
321         __u32   type;
322         __u16   misc;
323         __u16   size;
324 };
325
326 enum perf_event_type {
327
328         /*
329          * The MMAP events record the PROT_EXEC mappings so that we can
330          * correlate userspace IPs to code. They have the following structure:
331          *
332          * struct {
333          *      struct perf_event_header        header;
334          *
335          *      u32                             pid, tid;
336          *      u64                             addr;
337          *      u64                             len;
338          *      u64                             pgoff;
339          *      char                            filename[];
340          * };
341          */
342         PERF_RECORD_MMAP                        = 1,
343
344         /*
345          * struct {
346          *      struct perf_event_header        header;
347          *      u64                             id;
348          *      u64                             lost;
349          * };
350          */
351         PERF_RECORD_LOST                        = 2,
352
353         /*
354          * struct {
355          *      struct perf_event_header        header;
356          *
357          *      u32                             pid, tid;
358          *      char                            comm[];
359          * };
360          */
361         PERF_RECORD_COMM                        = 3,
362
363         /*
364          * struct {
365          *      struct perf_event_header        header;
366          *      u32                             pid, ppid;
367          *      u32                             tid, ptid;
368          *      u64                             time;
369          * };
370          */
371         PERF_RECORD_EXIT                        = 4,
372
373         /*
374          * struct {
375          *      struct perf_event_header        header;
376          *      u64                             time;
377          *      u64                             id;
378          *      u64                             stream_id;
379          * };
380          */
381         PERF_RECORD_THROTTLE                    = 5,
382         PERF_RECORD_UNTHROTTLE                  = 6,
383
384         /*
385          * struct {
386          *      struct perf_event_header        header;
387          *      u32                             pid, ppid;
388          *      u32                             tid, ptid;
389          *      u64                             time;
390          * };
391          */
392         PERF_RECORD_FORK                        = 7,
393
394         /*
395          * struct {
396          *      struct perf_event_header        header;
397          *      u32                             pid, tid;
398          *
399          *      struct read_format              values;
400          * };
401          */
402         PERF_RECORD_READ                        = 8,
403
404         /*
405          * struct {
406          *      struct perf_event_header        header;
407          *
408          *      { u64                   ip;       } && PERF_SAMPLE_IP
409          *      { u32                   pid, tid; } && PERF_SAMPLE_TID
410          *      { u64                   time;     } && PERF_SAMPLE_TIME
411          *      { u64                   addr;     } && PERF_SAMPLE_ADDR
412          *      { u64                   id;       } && PERF_SAMPLE_ID
413          *      { u64                   stream_id;} && PERF_SAMPLE_STREAM_ID
414          *      { u32                   cpu, res; } && PERF_SAMPLE_CPU
415          *      { u64                   period;   } && PERF_SAMPLE_PERIOD
416          *
417          *      { struct read_format    values;   } && PERF_SAMPLE_READ
418          *
419          *      { u64                   nr,
420          *        u64                   ips[nr];  } && PERF_SAMPLE_CALLCHAIN
421          *
422          *      #
423          *      # The RAW record below is opaque data wrt the ABI
424          *      #
425          *      # That is, the ABI doesn't make any promises wrt to
426          *      # the stability of its content, it may vary depending
427          *      # on event, hardware, kernel version and phase of
428          *      # the moon.
429          *      #
430          *      # In other words, PERF_SAMPLE_RAW contents are not an ABI.
431          *      #
432          *
433          *      { u32                   size;
434          *        char                  data[size];}&& PERF_SAMPLE_RAW
435          * };
436          */
437         PERF_RECORD_SAMPLE                      = 9,
438
439         PERF_RECORD_MAX,                        /* non-ABI */
440 };
441
442 enum perf_callchain_context {
443         PERF_CONTEXT_HV                 = (__u64)-32,
444         PERF_CONTEXT_KERNEL             = (__u64)-128,
445         PERF_CONTEXT_USER               = (__u64)-512,
446
447         PERF_CONTEXT_GUEST              = (__u64)-2048,
448         PERF_CONTEXT_GUEST_KERNEL       = (__u64)-2176,
449         PERF_CONTEXT_GUEST_USER         = (__u64)-2560,
450
451         PERF_CONTEXT_MAX                = (__u64)-4095,
452 };
453
454 #define PERF_FLAG_FD_NO_GROUP   (1U << 0)
455 #define PERF_FLAG_FD_OUTPUT     (1U << 1)
456
457 #ifdef __KERNEL__
458 /*
459  * Kernel-internal data types and definitions:
460  */
461
462 #ifdef CONFIG_PERF_EVENTS
463 # include <asm/perf_event.h>
464 #endif
465
466 struct perf_guest_info_callbacks {
467         int (*is_in_guest) (void);
468         int (*is_user_mode) (void);
469         unsigned long (*get_guest_ip) (void);
470 };
471
472 #ifdef CONFIG_HAVE_HW_BREAKPOINT
473 #include <asm/hw_breakpoint.h>
474 #endif
475
476 #include <linux/list.h>
477 #include <linux/mutex.h>
478 #include <linux/rculist.h>
479 #include <linux/rcupdate.h>
480 #include <linux/spinlock.h>
481 #include <linux/hrtimer.h>
482 #include <linux/fs.h>
483 #include <linux/pid_namespace.h>
484 #include <linux/workqueue.h>
485 #include <linux/ftrace.h>
486 #include <linux/cpu.h>
487 #include <asm/atomic.h>
488 #include <asm/local.h>
489
490 #define PERF_MAX_STACK_DEPTH            255
491
492 struct perf_callchain_entry {
493         __u64                           nr;
494         __u64                           ip[PERF_MAX_STACK_DEPTH];
495 };
496
497 struct perf_raw_record {
498         u32                             size;
499         void                            *data;
500 };
501
502 struct perf_branch_entry {
503         __u64                           from;
504         __u64                           to;
505         __u64                           flags;
506 };
507
508 struct perf_branch_stack {
509         __u64                           nr;
510         struct perf_branch_entry        entries[0];
511 };
512
513 struct task_struct;
514
515 /**
516  * struct hw_perf_event - performance event hardware details:
517  */
518 struct hw_perf_event {
519 #ifdef CONFIG_PERF_EVENTS
520         union {
521                 struct { /* hardware */
522                         u64             config;
523                         u64             last_tag;
524                         unsigned long   config_base;
525                         unsigned long   event_base;
526                         int             idx;
527                         int             last_cpu;
528                 };
529                 struct { /* software */
530                         s64             remaining;
531                         struct hrtimer  hrtimer;
532                 };
533 #ifdef CONFIG_HAVE_HW_BREAKPOINT
534                 /* breakpoint */
535                 struct arch_hw_breakpoint       info;
536 #endif
537         };
538         atomic64_t                      prev_count;
539         u64                             sample_period;
540         u64                             last_period;
541         atomic64_t                      period_left;
542         u64                             interrupts;
543
544         u64                             freq_time_stamp;
545         u64                             freq_count_stamp;
546 #endif
547 };
548
549 struct perf_event;
550
551 #define PERF_EVENT_TXN_STARTED 1
552
553 /**
554  * struct pmu - generic performance monitoring unit
555  */
556 struct pmu {
557         int (*enable)                   (struct perf_event *event);
558         void (*disable)                 (struct perf_event *event);
559         int (*start)                    (struct perf_event *event);
560         void (*stop)                    (struct perf_event *event);
561         void (*read)                    (struct perf_event *event);
562         void (*unthrottle)              (struct perf_event *event);
563
564         /*
565          * group events scheduling is treated as a transaction,
566          * add group events as a whole and perform one schedulability test.
567          * If test fails, roll back the whole group
568          */
569
570         void (*start_txn)       (const struct pmu *pmu);
571         void (*cancel_txn)      (const struct pmu *pmu);
572         int  (*commit_txn)      (const struct pmu *pmu);
573 };
574
575 /**
576  * enum perf_event_active_state - the states of a event
577  */
578 enum perf_event_active_state {
579         PERF_EVENT_STATE_ERROR          = -2,
580         PERF_EVENT_STATE_OFF            = -1,
581         PERF_EVENT_STATE_INACTIVE       =  0,
582         PERF_EVENT_STATE_ACTIVE         =  1,
583 };
584
585 struct file;
586
587 struct perf_mmap_data {
588         atomic_t                        refcount;
589         struct rcu_head                 rcu_head;
590 #ifdef CONFIG_PERF_USE_VMALLOC
591         struct work_struct              work;
592         int                             page_order;     /* allocation order  */
593 #endif
594         int                             nr_pages;       /* nr of data pages  */
595         int                             writable;       /* are we writable   */
596
597         atomic_t                        poll;           /* POLL_ for wakeups */
598
599         local_t                         head;           /* write position    */
600         local_t                         nest;           /* nested writers    */
601         local_t                         events;         /* event limit       */
602         local_t                         wakeup;         /* wakeup stamp      */
603         local_t                         lost;           /* nr records lost   */
604
605         long                            watermark;      /* wakeup watermark  */
606
607         struct perf_event_mmap_page     *user_page;
608         void                            *data_pages[0];
609 };
610
611 struct perf_pending_entry {
612         struct perf_pending_entry *next;
613         void (*func)(struct perf_pending_entry *);
614 };
615
616 struct perf_sample_data;
617
618 typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
619                                         struct perf_sample_data *,
620                                         struct pt_regs *regs);
621
622 enum perf_group_flag {
623         PERF_GROUP_SOFTWARE = 0x1,
624 };
625
626 #define SWEVENT_HLIST_BITS      8
627 #define SWEVENT_HLIST_SIZE      (1 << SWEVENT_HLIST_BITS)
628
629 struct swevent_hlist {
630         struct hlist_head       heads[SWEVENT_HLIST_SIZE];
631         struct rcu_head         rcu_head;
632 };
633
634 /**
635  * struct perf_event - performance event kernel representation:
636  */
637 struct perf_event {
638 #ifdef CONFIG_PERF_EVENTS
639         struct list_head                group_entry;
640         struct list_head                event_entry;
641         struct list_head                sibling_list;
642         struct hlist_node               hlist_entry;
643         int                             nr_siblings;
644         int                             group_flags;
645         struct perf_event               *group_leader;
646         const struct pmu                *pmu;
647
648         enum perf_event_active_state    state;
649         atomic64_t                      count;
650
651         /*
652          * These are the total time in nanoseconds that the event
653          * has been enabled (i.e. eligible to run, and the task has
654          * been scheduled in, if this is a per-task event)
655          * and running (scheduled onto the CPU), respectively.
656          *
657          * They are computed from tstamp_enabled, tstamp_running and
658          * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
659          */
660         u64                             total_time_enabled;
661         u64                             total_time_running;
662
663         /*
664          * These are timestamps used for computing total_time_enabled
665          * and total_time_running when the event is in INACTIVE or
666          * ACTIVE state, measured in nanoseconds from an arbitrary point
667          * in time.
668          * tstamp_enabled: the notional time when the event was enabled
669          * tstamp_running: the notional time when the event was scheduled on
670          * tstamp_stopped: in INACTIVE state, the notional time when the
671          *      event was scheduled off.
672          */
673         u64                             tstamp_enabled;
674         u64                             tstamp_running;
675         u64                             tstamp_stopped;
676
677         struct perf_event_attr          attr;
678         struct hw_perf_event            hw;
679
680         struct perf_event_context       *ctx;
681         struct file                     *filp;
682
683         /*
684          * These accumulate total time (in nanoseconds) that children
685          * events have been enabled and running, respectively.
686          */
687         atomic64_t                      child_total_time_enabled;
688         atomic64_t                      child_total_time_running;
689
690         /*
691          * Protect attach/detach and child_list:
692          */
693         struct mutex                    child_mutex;
694         struct list_head                child_list;
695         struct perf_event               *parent;
696
697         int                             oncpu;
698         int                             cpu;
699
700         struct list_head                owner_entry;
701         struct task_struct              *owner;
702
703         /* mmap bits */
704         struct mutex                    mmap_mutex;
705         atomic_t                        mmap_count;
706         int                             mmap_locked;
707         struct user_struct              *mmap_user;
708         struct perf_mmap_data           *data;
709
710         /* poll related */
711         wait_queue_head_t               waitq;
712         struct fasync_struct            *fasync;
713
714         /* delayed work for NMIs and such */
715         int                             pending_wakeup;
716         int                             pending_kill;
717         int                             pending_disable;
718         struct perf_pending_entry       pending;
719
720         atomic_t                        event_limit;
721
722         void (*destroy)(struct perf_event *);
723         struct rcu_head                 rcu_head;
724
725         struct pid_namespace            *ns;
726         u64                             id;
727
728         perf_overflow_handler_t         overflow_handler;
729
730 #ifdef CONFIG_EVENT_TRACING
731         struct ftrace_event_call        *tp_event;
732         struct event_filter             *filter;
733 #endif
734
735 #endif /* CONFIG_PERF_EVENTS */
736 };
737
738 /**
739  * struct perf_event_context - event context structure
740  *
741  * Used as a container for task events and CPU events as well:
742  */
743 struct perf_event_context {
744         /*
745          * Protect the states of the events in the list,
746          * nr_active, and the list:
747          */
748         raw_spinlock_t                  lock;
749         /*
750          * Protect the list of events.  Locking either mutex or lock
751          * is sufficient to ensure the list doesn't change; to change
752          * the list you need to lock both the mutex and the spinlock.
753          */
754         struct mutex                    mutex;
755
756         struct list_head                pinned_groups;
757         struct list_head                flexible_groups;
758         struct list_head                event_list;
759         int                             nr_events;
760         int                             nr_active;
761         int                             is_active;
762         int                             nr_stat;
763         atomic_t                        refcount;
764         struct task_struct              *task;
765
766         /*
767          * Context clock, runs when context enabled.
768          */
769         u64                             time;
770         u64                             timestamp;
771
772         /*
773          * These fields let us detect when two contexts have both
774          * been cloned (inherited) from a common ancestor.
775          */
776         struct perf_event_context       *parent_ctx;
777         u64                             parent_gen;
778         u64                             generation;
779         int                             pin_count;
780         struct rcu_head                 rcu_head;
781 };
782
783 /**
784  * struct perf_event_cpu_context - per cpu event context structure
785  */
786 struct perf_cpu_context {
787         struct perf_event_context       ctx;
788         struct perf_event_context       *task_ctx;
789         int                             active_oncpu;
790         int                             max_pertask;
791         int                             exclusive;
792         struct swevent_hlist            *swevent_hlist;
793         struct mutex                    hlist_mutex;
794         int                             hlist_refcount;
795
796         /*
797          * Recursion avoidance:
798          *
799          * task, softirq, irq, nmi context
800          */
801         int                             recursion[4];
802 };
803
804 struct perf_output_handle {
805         struct perf_event               *event;
806         struct perf_mmap_data           *data;
807         unsigned long                   wakeup;
808         unsigned long                   size;
809         void                            *addr;
810         int                             page;
811         int                             nmi;
812         int                             sample;
813 };
814
815 #ifdef CONFIG_PERF_EVENTS
816
817 /*
818  * Set by architecture code:
819  */
820 extern int perf_max_events;
821
822 extern const struct pmu *hw_perf_event_init(struct perf_event *event);
823
824 extern void perf_event_task_sched_in(struct task_struct *task);
825 extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
826 extern void perf_event_task_tick(struct task_struct *task);
827 extern int perf_event_init_task(struct task_struct *child);
828 extern void perf_event_exit_task(struct task_struct *child);
829 extern void perf_event_free_task(struct task_struct *task);
830 extern void set_perf_event_pending(void);
831 extern void perf_event_do_pending(void);
832 extern void perf_event_print_debug(void);
833 extern void __perf_disable(void);
834 extern bool __perf_enable(void);
835 extern void perf_disable(void);
836 extern void perf_enable(void);
837 extern int perf_event_task_disable(void);
838 extern int perf_event_task_enable(void);
839 extern void perf_event_update_userpage(struct perf_event *event);
840 extern int perf_event_release_kernel(struct perf_event *event);
841 extern struct perf_event *
842 perf_event_create_kernel_counter(struct perf_event_attr *attr,
843                                 int cpu,
844                                 pid_t pid,
845                                 perf_overflow_handler_t callback);
846 extern u64 perf_event_read_value(struct perf_event *event,
847                                  u64 *enabled, u64 *running);
848
849 struct perf_sample_data {
850         u64                             type;
851
852         u64                             ip;
853         struct {
854                 u32     pid;
855                 u32     tid;
856         }                               tid_entry;
857         u64                             time;
858         u64                             addr;
859         u64                             id;
860         u64                             stream_id;
861         struct {
862                 u32     cpu;
863                 u32     reserved;
864         }                               cpu_entry;
865         u64                             period;
866         struct perf_callchain_entry     *callchain;
867         struct perf_raw_record          *raw;
868 };
869
870 static inline
871 void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
872 {
873         data->addr = addr;
874         data->raw  = NULL;
875 }
876
877 extern void perf_output_sample(struct perf_output_handle *handle,
878                                struct perf_event_header *header,
879                                struct perf_sample_data *data,
880                                struct perf_event *event);
881 extern void perf_prepare_sample(struct perf_event_header *header,
882                                 struct perf_sample_data *data,
883                                 struct perf_event *event,
884                                 struct pt_regs *regs);
885
886 extern int perf_event_overflow(struct perf_event *event, int nmi,
887                                  struct perf_sample_data *data,
888                                  struct pt_regs *regs);
889
890 /*
891  * Return 1 for a software event, 0 for a hardware event
892  */
893 static inline int is_software_event(struct perf_event *event)
894 {
895         switch (event->attr.type) {
896         case PERF_TYPE_SOFTWARE:
897         case PERF_TYPE_TRACEPOINT:
898         /* for now the breakpoint stuff also works as software event */
899         case PERF_TYPE_BREAKPOINT:
900                 return 1;
901         }
902         return 0;
903 }
904
905 extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
906
907 extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
908
909 extern void
910 perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
911
912 /*
913  * Take a snapshot of the regs. Skip ip and frame pointer to
914  * the nth caller. We only need a few of the regs:
915  * - ip for PERF_SAMPLE_IP
916  * - cs for user_mode() tests
917  * - bp for callchains
918  * - eflags, for future purposes, just in case
919  */
920 static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip)
921 {
922         unsigned long ip;
923
924         memset(regs, 0, sizeof(*regs));
925
926         switch (skip) {
927         case 1 :
928                 ip = CALLER_ADDR0;
929                 break;
930         case 2 :
931                 ip = CALLER_ADDR1;
932                 break;
933         case 3 :
934                 ip = CALLER_ADDR2;
935                 break;
936         case 4:
937                 ip = CALLER_ADDR3;
938                 break;
939         /* No need to support further for now */
940         default:
941                 ip = 0;
942         }
943
944         return perf_arch_fetch_caller_regs(regs, ip, skip);
945 }
946
947 static inline void
948 perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
949 {
950         if (atomic_read(&perf_swevent_enabled[event_id])) {
951                 struct pt_regs hot_regs;
952
953                 if (!regs) {
954                         perf_fetch_caller_regs(&hot_regs, 1);
955                         regs = &hot_regs;
956                 }
957                 __perf_sw_event(event_id, nr, nmi, regs, addr);
958         }
959 }
960
961 extern void __perf_event_mmap(struct vm_area_struct *vma);
962
963 static inline void perf_event_mmap(struct vm_area_struct *vma)
964 {
965         if (vma->vm_flags & VM_EXEC)
966                 __perf_event_mmap(vma);
967 }
968
969 extern struct perf_guest_info_callbacks *perf_guest_cbs;
970 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
971 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
972
973 extern void perf_event_comm(struct task_struct *tsk);
974 extern void perf_event_fork(struct task_struct *tsk);
975
976 extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
977
978 extern int sysctl_perf_event_paranoid;
979 extern int sysctl_perf_event_mlock;
980 extern int sysctl_perf_event_sample_rate;
981
982 static inline bool perf_paranoid_tracepoint_raw(void)
983 {
984         return sysctl_perf_event_paranoid > -1;
985 }
986
987 static inline bool perf_paranoid_cpu(void)
988 {
989         return sysctl_perf_event_paranoid > 0;
990 }
991
992 static inline bool perf_paranoid_kernel(void)
993 {
994         return sysctl_perf_event_paranoid > 1;
995 }
996
997 extern void perf_event_init(void);
998 extern void perf_tp_event(u64 addr, u64 count, void *record,
999                           int entry_size, struct pt_regs *regs,
1000                           struct hlist_head *head);
1001 extern void perf_bp_event(struct perf_event *event, void *data);
1002
1003 #ifndef perf_misc_flags
1004 #define perf_misc_flags(regs)   (user_mode(regs) ? PERF_RECORD_MISC_USER : \
1005                                  PERF_RECORD_MISC_KERNEL)
1006 #define perf_instruction_pointer(regs)  instruction_pointer(regs)
1007 #endif
1008
1009 extern int perf_output_begin(struct perf_output_handle *handle,
1010                              struct perf_event *event, unsigned int size,
1011                              int nmi, int sample);
1012 extern void perf_output_end(struct perf_output_handle *handle);
1013 extern void perf_output_copy(struct perf_output_handle *handle,
1014                              const void *buf, unsigned int len);
1015 extern int perf_swevent_get_recursion_context(void);
1016 extern void perf_swevent_put_recursion_context(int rctx);
1017 extern void perf_event_enable(struct perf_event *event);
1018 extern void perf_event_disable(struct perf_event *event);
1019 #else
1020 static inline void
1021 perf_event_task_sched_in(struct task_struct *task)                      { }
1022 static inline void
1023 perf_event_task_sched_out(struct task_struct *task,
1024                             struct task_struct *next)                   { }
1025 static inline void
1026 perf_event_task_tick(struct task_struct *task)                          { }
1027 static inline int perf_event_init_task(struct task_struct *child)       { return 0; }
1028 static inline void perf_event_exit_task(struct task_struct *child)      { }
1029 static inline void perf_event_free_task(struct task_struct *task)       { }
1030 static inline void perf_event_do_pending(void)                          { }
1031 static inline void perf_event_print_debug(void)                         { }
1032 static inline void perf_disable(void)                                   { }
1033 static inline void perf_enable(void)                                    { }
1034 static inline int perf_event_task_disable(void)                         { return -EINVAL; }
1035 static inline int perf_event_task_enable(void)                          { return -EINVAL; }
1036
1037 static inline void
1038 perf_sw_event(u32 event_id, u64 nr, int nmi,
1039                      struct pt_regs *regs, u64 addr)                    { }
1040 static inline void
1041 perf_bp_event(struct perf_event *event, void *data)                     { }
1042
1043 static inline int perf_register_guest_info_callbacks
1044 (struct perf_guest_info_callbacks *callbacks) { return 0; }
1045 static inline int perf_unregister_guest_info_callbacks
1046 (struct perf_guest_info_callbacks *callbacks) { return 0; }
1047
1048 static inline void perf_event_mmap(struct vm_area_struct *vma)          { }
1049 static inline void perf_event_comm(struct task_struct *tsk)             { }
1050 static inline void perf_event_fork(struct task_struct *tsk)             { }
1051 static inline void perf_event_init(void)                                { }
1052 static inline int  perf_swevent_get_recursion_context(void)             { return -1; }
1053 static inline void perf_swevent_put_recursion_context(int rctx)         { }
1054 static inline void perf_event_enable(struct perf_event *event)          { }
1055 static inline void perf_event_disable(struct perf_event *event)         { }
1056 #endif
1057
1058 #define perf_output_put(handle, x) \
1059         perf_output_copy((handle), &(x), sizeof(x))
1060
1061 /*
1062  * This has to have a higher priority than migration_notifier in sched.c.
1063  */
1064 #define perf_cpu_notifier(fn)                                   \
1065 do {                                                            \
1066         static struct notifier_block fn##_nb __cpuinitdata =    \
1067                 { .notifier_call = fn, .priority = 20 };        \
1068         fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,             \
1069                 (void *)(unsigned long)smp_processor_id());     \
1070         fn(&fn##_nb, (unsigned long)CPU_STARTING,               \
1071                 (void *)(unsigned long)smp_processor_id());     \
1072         fn(&fn##_nb, (unsigned long)CPU_ONLINE,                 \
1073                 (void *)(unsigned long)smp_processor_id());     \
1074         register_cpu_notifier(&fn##_nb);                        \
1075 } while (0)
1076
1077 #endif /* __KERNEL__ */
1078 #endif /* _LINUX_PERF_EVENT_H */