]> git.karo-electronics.de Git - mv-sheeva.git/blob - include/linux/perf_counter.h
perfcounters: enable lowlevel pmc code to schedule counters
[mv-sheeva.git] / include / linux / perf_counter.h
1 /*
2  *  Performance counters:
3  *
4  *   Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de>
5  *   Copyright(C) 2008, Red Hat, Inc., Ingo Molnar
6  *
7  *  Data type definitions, declarations, prototypes.
8  *
9  *  Started by: Thomas Gleixner and Ingo Molnar
10  *
11  *  For licencing details see kernel-base/COPYING
12  */
13 #ifndef _LINUX_PERF_COUNTER_H
14 #define _LINUX_PERF_COUNTER_H
15
16 #include <asm/atomic.h>
17 #include <asm/perf_counter.h>
18
19 #include <linux/list.h>
20 #include <linux/mutex.h>
21 #include <linux/rculist.h>
22 #include <linux/rcupdate.h>
23 #include <linux/spinlock.h>
24
25 struct task_struct;
26
27 /*
28  * User-space ABI bits:
29  */
30
31 /*
32  * Generalized performance counter event types, used by the hw_event.type
33  * parameter of the sys_perf_counter_open() syscall:
34  */
35 enum hw_event_types {
36         /*
37          * Common hardware events, generalized by the kernel:
38          */
39         PERF_COUNT_CYCLES               =  0,
40         PERF_COUNT_INSTRUCTIONS         =  1,
41         PERF_COUNT_CACHE_REFERENCES     =  2,
42         PERF_COUNT_CACHE_MISSES         =  3,
43         PERF_COUNT_BRANCH_INSTRUCTIONS  =  4,
44         PERF_COUNT_BRANCH_MISSES        =  5,
45
46         PERF_HW_EVENTS_MAX              =  6,
47
48         /*
49          * Special "software" counters provided by the kernel, even if
50          * the hardware does not support performance counters. These
51          * counters measure various physical and sw events of the
52          * kernel (and allow the profiling of them as well):
53          */
54         PERF_COUNT_CPU_CLOCK            = -1,
55         PERF_COUNT_TASK_CLOCK           = -2,
56         PERF_COUNT_PAGE_FAULTS          = -3,
57         PERF_COUNT_CONTEXT_SWITCHES     = -4,
58         PERF_COUNT_CPU_MIGRATIONS       = -5,
59
60         PERF_SW_EVENTS_MIN              = -6,
61 };
62
63 /*
64  * IRQ-notification data record type:
65  */
66 enum perf_counter_record_type {
67         PERF_RECORD_SIMPLE              =  0,
68         PERF_RECORD_IRQ                 =  1,
69         PERF_RECORD_GROUP               =  2,
70 };
71
72 /*
73  * Hardware event to monitor via a performance monitoring counter:
74  */
75 struct perf_counter_hw_event {
76         s64                     type;
77
78         u64                     irq_period;
79         u32                     record_type;
80
81         u32                     disabled     :  1, /* off by default      */
82                                 nmi          :  1, /* NMI sampling        */
83                                 raw          :  1, /* raw event type      */
84                                 inherit      :  1, /* children inherit it */
85                                 __reserved_1 : 28;
86
87         u64                     __reserved_2;
88 };
89
90 /*
91  * Kernel-internal data types:
92  */
93
94 /**
95  * struct hw_perf_counter - performance counter hardware details:
96  */
97 struct hw_perf_counter {
98 #ifdef CONFIG_PERF_COUNTERS
99         u64                             config;
100         unsigned long                   config_base;
101         unsigned long                   counter_base;
102         int                             nmi;
103         unsigned int                    idx;
104         atomic64_t                      prev_count;
105         u64                             irq_period;
106         atomic64_t                      period_left;
107 #endif
108 };
109
110 /*
111  * Hardcoded buffer length limit for now, for IRQ-fed events:
112  */
113 #define PERF_DATA_BUFLEN                2048
114
115 /**
116  * struct perf_data - performance counter IRQ data sampling ...
117  */
118 struct perf_data {
119         int                             len;
120         int                             rd_idx;
121         int                             overrun;
122         u8                              data[PERF_DATA_BUFLEN];
123 };
124
125 struct perf_counter;
126
127 /**
128  * struct hw_perf_counter_ops - performance counter hw ops
129  */
130 struct hw_perf_counter_ops {
131         int (*enable)                   (struct perf_counter *counter);
132         void (*disable)                 (struct perf_counter *counter);
133         void (*read)                    (struct perf_counter *counter);
134 };
135
136 /**
137  * enum perf_counter_active_state - the states of a counter
138  */
139 enum perf_counter_active_state {
140         PERF_COUNTER_STATE_OFF          = -1,
141         PERF_COUNTER_STATE_INACTIVE     =  0,
142         PERF_COUNTER_STATE_ACTIVE       =  1,
143 };
144
145 struct file;
146
147 /**
148  * struct perf_counter - performance counter kernel representation:
149  */
150 struct perf_counter {
151 #ifdef CONFIG_PERF_COUNTERS
152         struct list_head                list_entry;
153         struct list_head                sibling_list;
154         struct perf_counter             *group_leader;
155         const struct hw_perf_counter_ops *hw_ops;
156
157         enum perf_counter_active_state  state;
158         atomic64_t                      count;
159
160         struct perf_counter_hw_event    hw_event;
161         struct hw_perf_counter          hw;
162
163         struct perf_counter_context     *ctx;
164         struct task_struct              *task;
165         struct file                     *filp;
166
167         unsigned int                    nr_inherited;
168         struct perf_counter             *parent;
169         /*
170          * Protect attach/detach:
171          */
172         struct mutex                    mutex;
173
174         int                             oncpu;
175         int                             cpu;
176
177         /* read() / irq related data */
178         wait_queue_head_t               waitq;
179         /* optional: for NMIs */
180         int                             wakeup_pending;
181         struct perf_data                *irqdata;
182         struct perf_data                *usrdata;
183         struct perf_data                data[2];
184 #endif
185 };
186
187 /**
188  * struct perf_counter_context - counter context structure
189  *
190  * Used as a container for task counters and CPU counters as well:
191  */
192 struct perf_counter_context {
193 #ifdef CONFIG_PERF_COUNTERS
194         /*
195          * Protect the list of counters:
196          */
197         spinlock_t              lock;
198
199         struct list_head        counter_list;
200         int                     nr_counters;
201         int                     nr_active;
202         struct task_struct      *task;
203 #endif
204 };
205
206 /**
207  * struct perf_counter_cpu_context - per cpu counter context structure
208  */
209 struct perf_cpu_context {
210         struct perf_counter_context     ctx;
211         struct perf_counter_context     *task_ctx;
212         int                             active_oncpu;
213         int                             max_pertask;
214 };
215
216 /*
217  * Set by architecture code:
218  */
219 extern int perf_max_counters;
220
221 #ifdef CONFIG_PERF_COUNTERS
222 extern const struct hw_perf_counter_ops *
223 hw_perf_counter_init(struct perf_counter *counter);
224
225 extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
226 extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
227 extern void perf_counter_task_tick(struct task_struct *task, int cpu);
228 extern void perf_counter_init_task(struct task_struct *child);
229 extern void perf_counter_exit_task(struct task_struct *child);
230 extern void perf_counter_notify(struct pt_regs *regs);
231 extern void perf_counter_print_debug(void);
232 extern u64 hw_perf_save_disable(void);
233 extern void hw_perf_restore(u64 ctrl);
234 extern int perf_counter_task_disable(void);
235 extern int perf_counter_task_enable(void);
236
237 #else
238 static inline void
239 perf_counter_task_sched_in(struct task_struct *task, int cpu)           { }
240 static inline void
241 perf_counter_task_sched_out(struct task_struct *task, int cpu)          { }
242 static inline void
243 perf_counter_task_tick(struct task_struct *task, int cpu)               { }
244 static inline void perf_counter_init_task(struct task_struct *child)    { }
245 static inline void perf_counter_exit_task(struct task_struct *child)    { }
246 static inline void perf_counter_notify(struct pt_regs *regs)            { }
247 static inline void perf_counter_print_debug(void)                       { }
248 static inline void hw_perf_restore(u64 ctrl)                    { }
249 static inline u64 hw_perf_save_disable(void)                  { return 0; }
250 static inline int perf_counter_task_disable(void)       { return -EINVAL; }
251 static inline int perf_counter_task_enable(void)        { return -EINVAL; }
252 #endif
253
254 #endif /* _LINUX_PERF_COUNTER_H */