]> git.karo-electronics.de Git - mv-sheeva.git/blob - include/linux/perf_counter.h
perf_counter: Add support for pinned and exclusive counter groups
[mv-sheeva.git] / include / linux / perf_counter.h
1 /*
2  *  Performance counters:
3  *
4  *   Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de>
5  *   Copyright(C) 2008, Red Hat, Inc., Ingo Molnar
6  *
7  *  Data type definitions, declarations, prototypes.
8  *
9  *  Started by: Thomas Gleixner and Ingo Molnar
10  *
11  *  For licencing details see kernel-base/COPYING
12  */
13 #ifndef _LINUX_PERF_COUNTER_H
14 #define _LINUX_PERF_COUNTER_H
15
16 #include <asm/atomic.h>
17
18 #ifdef CONFIG_PERF_COUNTERS
19 # include <asm/perf_counter.h>
20 #endif
21
22 #include <linux/list.h>
23 #include <linux/mutex.h>
24 #include <linux/rculist.h>
25 #include <linux/rcupdate.h>
26 #include <linux/spinlock.h>
27
28 struct task_struct;
29
30 /*
31  * User-space ABI bits:
32  */
33
34 /*
35  * Generalized performance counter event types, used by the hw_event.type
36  * parameter of the sys_perf_counter_open() syscall:
37  */
38 enum hw_event_types {
39         /*
40          * Common hardware events, generalized by the kernel:
41          */
42         PERF_COUNT_CPU_CYCLES           =  0,
43         PERF_COUNT_INSTRUCTIONS         =  1,
44         PERF_COUNT_CACHE_REFERENCES     =  2,
45         PERF_COUNT_CACHE_MISSES         =  3,
46         PERF_COUNT_BRANCH_INSTRUCTIONS  =  4,
47         PERF_COUNT_BRANCH_MISSES        =  5,
48         PERF_COUNT_BUS_CYCLES           =  6,
49
50         PERF_HW_EVENTS_MAX              =  7,
51
52         /*
53          * Special "software" counters provided by the kernel, even if
54          * the hardware does not support performance counters. These
55          * counters measure various physical and sw events of the
56          * kernel (and allow the profiling of them as well):
57          */
58         PERF_COUNT_CPU_CLOCK            = -1,
59         PERF_COUNT_TASK_CLOCK           = -2,
60         PERF_COUNT_PAGE_FAULTS          = -3,
61         PERF_COUNT_CONTEXT_SWITCHES     = -4,
62         PERF_COUNT_CPU_MIGRATIONS       = -5,
63
64         PERF_SW_EVENTS_MIN              = -6,
65 };
66
67 /*
68  * IRQ-notification data record type:
69  */
70 enum perf_counter_record_type {
71         PERF_RECORD_SIMPLE              =  0,
72         PERF_RECORD_IRQ                 =  1,
73         PERF_RECORD_GROUP               =  2,
74 };
75
76 /*
77  * Hardware event to monitor via a performance monitoring counter:
78  */
79 struct perf_counter_hw_event {
80         s64                     type;
81
82         u64                     irq_period;
83         u32                     record_type;
84
85         u32                     disabled     :  1, /* off by default      */
86                                 nmi          :  1, /* NMI sampling        */
87                                 raw          :  1, /* raw event type      */
88                                 inherit      :  1, /* children inherit it */
89                                 pinned       :  1, /* must always be on PMU */
90                                 exclusive    :  1, /* only counter on PMU */
91
92                                 __reserved_1 : 26;
93
94         u64                     __reserved_2;
95 };
96
97 /*
98  * Kernel-internal data types:
99  */
100
101 /**
102  * struct hw_perf_counter - performance counter hardware details:
103  */
104 struct hw_perf_counter {
105 #ifdef CONFIG_PERF_COUNTERS
106         u64                             config;
107         unsigned long                   config_base;
108         unsigned long                   counter_base;
109         int                             nmi;
110         unsigned int                    idx;
111         atomic64_t                      prev_count;
112         u64                             irq_period;
113         atomic64_t                      period_left;
114 #endif
115 };
116
117 /*
118  * Hardcoded buffer length limit for now, for IRQ-fed events:
119  */
120 #define PERF_DATA_BUFLEN                2048
121
122 /**
123  * struct perf_data - performance counter IRQ data sampling ...
124  */
125 struct perf_data {
126         int                             len;
127         int                             rd_idx;
128         int                             overrun;
129         u8                              data[PERF_DATA_BUFLEN];
130 };
131
132 struct perf_counter;
133
134 /**
135  * struct hw_perf_counter_ops - performance counter hw ops
136  */
137 struct hw_perf_counter_ops {
138         int (*enable)                   (struct perf_counter *counter);
139         void (*disable)                 (struct perf_counter *counter);
140         void (*read)                    (struct perf_counter *counter);
141 };
142
143 /**
144  * enum perf_counter_active_state - the states of a counter
145  */
146 enum perf_counter_active_state {
147         PERF_COUNTER_STATE_ERROR        = -2,
148         PERF_COUNTER_STATE_OFF          = -1,
149         PERF_COUNTER_STATE_INACTIVE     =  0,
150         PERF_COUNTER_STATE_ACTIVE       =  1,
151 };
152
153 struct file;
154
155 /**
156  * struct perf_counter - performance counter kernel representation:
157  */
158 struct perf_counter {
159 #ifdef CONFIG_PERF_COUNTERS
160         struct list_head                list_entry;
161         struct list_head                sibling_list;
162         struct perf_counter             *group_leader;
163         const struct hw_perf_counter_ops *hw_ops;
164
165         enum perf_counter_active_state  state;
166         atomic64_t                      count;
167
168         struct perf_counter_hw_event    hw_event;
169         struct hw_perf_counter          hw;
170
171         struct perf_counter_context     *ctx;
172         struct task_struct              *task;
173         struct file                     *filp;
174
175         struct perf_counter             *parent;
176         /*
177          * Protect attach/detach:
178          */
179         struct mutex                    mutex;
180
181         int                             oncpu;
182         int                             cpu;
183
184         /* read() / irq related data */
185         wait_queue_head_t               waitq;
186         /* optional: for NMIs */
187         int                             wakeup_pending;
188         struct perf_data                *irqdata;
189         struct perf_data                *usrdata;
190         struct perf_data                data[2];
191 #endif
192 };
193
194 /**
195  * struct perf_counter_context - counter context structure
196  *
197  * Used as a container for task counters and CPU counters as well:
198  */
199 struct perf_counter_context {
200 #ifdef CONFIG_PERF_COUNTERS
201         /*
202          * Protect the list of counters:
203          */
204         spinlock_t              lock;
205
206         struct list_head        counter_list;
207         int                     nr_counters;
208         int                     nr_active;
209         struct task_struct      *task;
210 #endif
211 };
212
213 /**
214  * struct perf_counter_cpu_context - per cpu counter context structure
215  */
216 struct perf_cpu_context {
217         struct perf_counter_context     ctx;
218         struct perf_counter_context     *task_ctx;
219         int                             active_oncpu;
220         int                             max_pertask;
221         int                             exclusive;
222 };
223
224 /*
225  * Set by architecture code:
226  */
227 extern int perf_max_counters;
228
229 #ifdef CONFIG_PERF_COUNTERS
230 extern const struct hw_perf_counter_ops *
231 hw_perf_counter_init(struct perf_counter *counter);
232
233 extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
234 extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
235 extern void perf_counter_task_tick(struct task_struct *task, int cpu);
236 extern void perf_counter_init_task(struct task_struct *child);
237 extern void perf_counter_exit_task(struct task_struct *child);
238 extern void perf_counter_notify(struct pt_regs *regs);
239 extern void perf_counter_print_debug(void);
240 extern u64 hw_perf_save_disable(void);
241 extern void hw_perf_restore(u64 ctrl);
242 extern int perf_counter_task_disable(void);
243 extern int perf_counter_task_enable(void);
244 extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
245                struct perf_cpu_context *cpuctx,
246                struct perf_counter_context *ctx, int cpu);
247
248 /*
249  * Return 1 for a software counter, 0 for a hardware counter
250  */
251 static inline int is_software_counter(struct perf_counter *counter)
252 {
253         return !counter->hw_event.raw && counter->hw_event.type < 0;
254 }
255
256 #else
257 static inline void
258 perf_counter_task_sched_in(struct task_struct *task, int cpu)           { }
259 static inline void
260 perf_counter_task_sched_out(struct task_struct *task, int cpu)          { }
261 static inline void
262 perf_counter_task_tick(struct task_struct *task, int cpu)               { }
263 static inline void perf_counter_init_task(struct task_struct *child)    { }
264 static inline void perf_counter_exit_task(struct task_struct *child)    { }
265 static inline void perf_counter_notify(struct pt_regs *regs)            { }
266 static inline void perf_counter_print_debug(void)                       { }
267 static inline void hw_perf_restore(u64 ctrl)                    { }
268 static inline u64 hw_perf_save_disable(void)                  { return 0; }
269 static inline int perf_counter_task_disable(void)       { return -EINVAL; }
270 static inline int perf_counter_task_enable(void)        { return -EINVAL; }
271 #endif
272
273 #endif /* _LINUX_PERF_COUNTER_H */