]> git.karo-electronics.de Git - mv-sheeva.git/blob - include/linux/perf_counter.h
Merge branch 'rfc' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/perfcounte...
[mv-sheeva.git] / include / linux / perf_counter.h
1 /*
2  *  Performance counters:
3  *
4  *   Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de>
5  *   Copyright(C) 2008, Red Hat, Inc., Ingo Molnar
6  *
7  *  Data type definitions, declarations, prototypes.
8  *
9  *  Started by: Thomas Gleixner and Ingo Molnar
10  *
11  *  For licencing details see kernel-base/COPYING
12  */
13 #ifndef _LINUX_PERF_COUNTER_H
14 #define _LINUX_PERF_COUNTER_H
15
16 #include <linux/types.h>
17 #include <linux/ioctl.h>
18
19 /*
20  * User-space ABI bits:
21  */
22
23 /*
24  * Generalized performance counter event types, used by the hw_event.type
25  * parameter of the sys_perf_counter_open() syscall:
26  */
27 enum hw_event_types {
28         /*
29          * Common hardware events, generalized by the kernel:
30          */
31         PERF_COUNT_CPU_CYCLES           =  0,
32         PERF_COUNT_INSTRUCTIONS         =  1,
33         PERF_COUNT_CACHE_REFERENCES     =  2,
34         PERF_COUNT_CACHE_MISSES         =  3,
35         PERF_COUNT_BRANCH_INSTRUCTIONS  =  4,
36         PERF_COUNT_BRANCH_MISSES        =  5,
37         PERF_COUNT_BUS_CYCLES           =  6,
38
39         PERF_HW_EVENTS_MAX              =  7,
40
41         /*
42          * Special "software" counters provided by the kernel, even if
43          * the hardware does not support performance counters. These
44          * counters measure various physical and sw events of the
45          * kernel (and allow the profiling of them as well):
46          */
47         PERF_COUNT_CPU_CLOCK            = -1,
48         PERF_COUNT_TASK_CLOCK           = -2,
49         PERF_COUNT_PAGE_FAULTS          = -3,
50         PERF_COUNT_CONTEXT_SWITCHES     = -4,
51         PERF_COUNT_CPU_MIGRATIONS       = -5,
52
53         PERF_SW_EVENTS_MIN              = -6,
54 };
55
56 /*
57  * IRQ-notification data record type:
58  */
59 enum perf_counter_record_type {
60         PERF_RECORD_SIMPLE              =  0,
61         PERF_RECORD_IRQ                 =  1,
62         PERF_RECORD_GROUP               =  2,
63 };
64
65 /*
66  * Hardware event to monitor via a performance monitoring counter:
67  */
68 struct perf_counter_hw_event {
69         __s64                   type;
70
71         __u64                   irq_period;
72         __u64                   record_type;
73         __u64                   read_format;
74
75         __u64                   disabled       :  1, /* off by default        */
76                                 nmi            :  1, /* NMI sampling          */
77                                 raw            :  1, /* raw event type        */
78                                 inherit        :  1, /* children inherit it   */
79                                 pinned         :  1, /* must always be on PMU */
80                                 exclusive      :  1, /* only group on PMU     */
81                                 exclude_user   :  1, /* don't count user      */
82                                 exclude_kernel :  1, /* ditto kernel          */
83                                 exclude_hv     :  1, /* ditto hypervisor      */
84                                 exclude_idle   :  1, /* don't count when idle */
85
86                                 __reserved_1 : 55;
87
88         __u32                   extra_config_len;
89         __u32                   __reserved_4;
90
91         __u64                   __reserved_2;
92         __u64                   __reserved_3;
93 };
94
95 /*
96  * Ioctls that can be done on a perf counter fd:
97  */
98 #define PERF_COUNTER_IOC_ENABLE         _IO('$', 0)
99 #define PERF_COUNTER_IOC_DISABLE        _IO('$', 1)
100
101 #ifdef __KERNEL__
102 /*
103  * Kernel-internal data types and definitions:
104  */
105
106 #ifdef CONFIG_PERF_COUNTERS
107 # include <asm/perf_counter.h>
108 #endif
109
110 #include <linux/list.h>
111 #include <linux/mutex.h>
112 #include <linux/rculist.h>
113 #include <linux/rcupdate.h>
114 #include <linux/spinlock.h>
115 #include <asm/atomic.h>
116
117 struct task_struct;
118
119 /**
120  * struct hw_perf_counter - performance counter hardware details:
121  */
122 struct hw_perf_counter {
123 #ifdef CONFIG_PERF_COUNTERS
124         u64                             config;
125         unsigned long                   config_base;
126         unsigned long                   counter_base;
127         int                             nmi;
128         unsigned int                    idx;
129         atomic64_t                      prev_count;
130         u64                             irq_period;
131         atomic64_t                      period_left;
132 #endif
133 };
134
135 /*
136  * Hardcoded buffer length limit for now, for IRQ-fed events:
137  */
138 #define PERF_DATA_BUFLEN                2048
139
140 /**
141  * struct perf_data - performance counter IRQ data sampling ...
142  */
143 struct perf_data {
144         int                             len;
145         int                             rd_idx;
146         int                             overrun;
147         u8                              data[PERF_DATA_BUFLEN];
148 };
149
150 struct perf_counter;
151
152 /**
153  * struct hw_perf_counter_ops - performance counter hw ops
154  */
155 struct hw_perf_counter_ops {
156         int (*enable)                   (struct perf_counter *counter);
157         void (*disable)                 (struct perf_counter *counter);
158         void (*read)                    (struct perf_counter *counter);
159 };
160
161 /**
162  * enum perf_counter_active_state - the states of a counter
163  */
164 enum perf_counter_active_state {
165         PERF_COUNTER_STATE_ERROR        = -2,
166         PERF_COUNTER_STATE_OFF          = -1,
167         PERF_COUNTER_STATE_INACTIVE     =  0,
168         PERF_COUNTER_STATE_ACTIVE       =  1,
169 };
170
171 struct file;
172
173 /**
174  * struct perf_counter - performance counter kernel representation:
175  */
176 struct perf_counter {
177 #ifdef CONFIG_PERF_COUNTERS
178         struct list_head                list_entry;
179         struct list_head                sibling_list;
180         struct perf_counter             *group_leader;
181         const struct hw_perf_counter_ops *hw_ops;
182
183         enum perf_counter_active_state  state;
184         enum perf_counter_active_state  prev_state;
185         atomic64_t                      count;
186
187         struct perf_counter_hw_event    hw_event;
188         struct hw_perf_counter          hw;
189
190         struct perf_counter_context     *ctx;
191         struct task_struct              *task;
192         struct file                     *filp;
193
194         struct perf_counter             *parent;
195         struct list_head                child_list;
196
197         /*
198          * Protect attach/detach and child_list:
199          */
200         struct mutex                    mutex;
201
202         int                             oncpu;
203         int                             cpu;
204
205         /* read() / irq related data */
206         wait_queue_head_t               waitq;
207         /* optional: for NMIs */
208         int                             wakeup_pending;
209         struct perf_data                *irqdata;
210         struct perf_data                *usrdata;
211         struct perf_data                data[2];
212 #endif
213 };
214
215 /**
216  * struct perf_counter_context - counter context structure
217  *
218  * Used as a container for task counters and CPU counters as well:
219  */
220 struct perf_counter_context {
221 #ifdef CONFIG_PERF_COUNTERS
222         /*
223          * Protect the states of the counters in the list,
224          * nr_active, and the list:
225          */
226         spinlock_t              lock;
227         /*
228          * Protect the list of counters.  Locking either mutex or lock
229          * is sufficient to ensure the list doesn't change; to change
230          * the list you need to lock both the mutex and the spinlock.
231          */
232         struct mutex            mutex;
233
234         struct list_head        counter_list;
235         int                     nr_counters;
236         int                     nr_active;
237         int                     is_active;
238         struct task_struct      *task;
239 #endif
240 };
241
242 /**
243  * struct perf_counter_cpu_context - per cpu counter context structure
244  */
245 struct perf_cpu_context {
246         struct perf_counter_context     ctx;
247         struct perf_counter_context     *task_ctx;
248         int                             active_oncpu;
249         int                             max_pertask;
250         int                             exclusive;
251 };
252
253 /*
254  * Set by architecture code:
255  */
256 extern int perf_max_counters;
257
258 #ifdef CONFIG_PERF_COUNTERS
259 extern const struct hw_perf_counter_ops *
260 hw_perf_counter_init(struct perf_counter *counter);
261
262 extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
263 extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
264 extern void perf_counter_task_tick(struct task_struct *task, int cpu);
265 extern void perf_counter_init_task(struct task_struct *child);
266 extern void perf_counter_exit_task(struct task_struct *child);
267 extern void perf_counter_notify(struct pt_regs *regs);
268 extern void perf_counter_print_debug(void);
269 extern void perf_counter_unthrottle(void);
270 extern u64 hw_perf_save_disable(void);
271 extern void hw_perf_restore(u64 ctrl);
272 extern int perf_counter_task_disable(void);
273 extern int perf_counter_task_enable(void);
274 extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
275                struct perf_cpu_context *cpuctx,
276                struct perf_counter_context *ctx, int cpu);
277
278 /*
279  * Return 1 for a software counter, 0 for a hardware counter
280  */
281 static inline int is_software_counter(struct perf_counter *counter)
282 {
283         return !counter->hw_event.raw && counter->hw_event.type < 0;
284 }
285
286 #else
287 static inline void
288 perf_counter_task_sched_in(struct task_struct *task, int cpu)           { }
289 static inline void
290 perf_counter_task_sched_out(struct task_struct *task, int cpu)          { }
291 static inline void
292 perf_counter_task_tick(struct task_struct *task, int cpu)               { }
293 static inline void perf_counter_init_task(struct task_struct *child)    { }
294 static inline void perf_counter_exit_task(struct task_struct *child)    { }
295 static inline void perf_counter_notify(struct pt_regs *regs)            { }
296 static inline void perf_counter_print_debug(void)                       { }
297 static inline void perf_counter_unthrottle(void)                        { }
298 static inline void hw_perf_restore(u64 ctrl)                    { }
299 static inline u64 hw_perf_save_disable(void)                  { return 0; }
300 static inline int perf_counter_task_disable(void)       { return -EINVAL; }
301 static inline int perf_counter_task_enable(void)        { return -EINVAL; }
302 #endif
303
304 #endif /* __KERNEL__ */
305 #endif /* _LINUX_PERF_COUNTER_H */