4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
8 * @author Barry Kasindorf <barry.kasindorf@amd.com>
10 * Each CPU has a local buffer that stores PC value/event
11 * pairs. We also log context switches when we notice them.
12 * Eventually each CPU's buffer is processed into the global
13 * event buffer by sync_buffer().
15 * We use a local buffer for two reasons: an NMI or similar
16 * interrupt cannot synchronise, and high sampling rates
17 * would lead to catastrophic global synchronisation if
18 * a global buffer was used.
21 #include <linux/sched.h>
22 #include <linux/oprofile.h>
23 #include <linux/vmalloc.h>
24 #include <linux/errno.h>
26 #include "event_buffer.h"
27 #include "cpu_buffer.h"
28 #include "buffer_sync.h"
31 DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
33 static void wq_sync_buffer(struct work_struct *work);
35 #define DEFAULT_TIMER_EXPIRE (HZ / 10)
36 static int work_enabled;
38 void free_cpu_buffers(void)
42 for_each_online_cpu(i) {
43 vfree(per_cpu(cpu_buffer, i).buffer);
44 per_cpu(cpu_buffer, i).buffer = NULL;
48 unsigned long oprofile_get_cpu_buffer_size(void)
50 return fs_cpu_buffer_size;
53 void oprofile_cpu_buffer_inc_smpl_lost(void)
55 struct oprofile_cpu_buffer *cpu_buf
56 = &__get_cpu_var(cpu_buffer);
58 cpu_buf->sample_lost_overflow++;
61 int alloc_cpu_buffers(void)
65 unsigned long buffer_size = fs_cpu_buffer_size;
67 for_each_online_cpu(i) {
68 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
70 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
76 b->last_is_kernel = -1;
78 b->buffer_size = buffer_size;
81 b->sample_received = 0;
82 b->sample_lost_overflow = 0;
83 b->backtrace_aborted = 0;
84 b->sample_invalid_eip = 0;
86 INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
95 void start_cpu_work(void)
101 for_each_online_cpu(i) {
102 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
105 * Spread the work by 1 jiffy per cpu so they dont all
108 schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
112 void end_cpu_work(void)
118 for_each_online_cpu(i) {
119 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
121 cancel_delayed_work(&b->work);
124 flush_scheduled_work();
127 /* Resets the cpu buffer to a sane state. */
128 void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
130 /* reset these to invalid values; the next sample
131 * collected will populate the buffer with proper
132 * values to initialize the buffer
134 cpu_buf->last_is_kernel = -1;
135 cpu_buf->last_task = NULL;
138 /* compute number of available slots in cpu_buffer queue */
139 static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
141 unsigned long head = b->head_pos;
142 unsigned long tail = b->tail_pos;
145 return (tail - head) - 1;
147 return tail + (b->buffer_size - head) - 1;
150 static void increment_head(struct oprofile_cpu_buffer * b)
152 unsigned long new_head = b->head_pos + 1;
154 /* Ensure anything written to the slot before we
155 * increment is visible */
158 if (new_head < b->buffer_size)
159 b->head_pos = new_head;
165 add_sample(struct oprofile_cpu_buffer * cpu_buf,
166 unsigned long pc, unsigned long event)
168 struct op_sample * entry = &cpu_buf->buffer[cpu_buf->head_pos];
170 entry->event = event;
171 increment_head(cpu_buf);
175 add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
177 add_sample(buffer, ESCAPE_CODE, value);
180 /* This must be safe from any context. It's safe writing here
181 * because of the head/tail separation of the writer and reader
184 * is_kernel is needed because on some architectures you cannot
185 * tell if you are in kernel or user space simply by looking at
186 * pc. We tag this in the buffer by generating kernel enter/exit
187 * events whenever is_kernel changes
189 static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
190 int is_kernel, unsigned long event)
192 struct task_struct * task;
194 cpu_buf->sample_received++;
196 if (pc == ESCAPE_CODE) {
197 cpu_buf->sample_invalid_eip++;
201 if (nr_available_slots(cpu_buf) < 3) {
202 cpu_buf->sample_lost_overflow++;
206 is_kernel = !!is_kernel;
210 /* notice a switch from user->kernel or vice versa */
211 if (cpu_buf->last_is_kernel != is_kernel) {
212 cpu_buf->last_is_kernel = is_kernel;
213 add_code(cpu_buf, is_kernel);
216 /* notice a task switch */
217 if (cpu_buf->last_task != task) {
218 cpu_buf->last_task = task;
219 add_code(cpu_buf, (unsigned long)task);
222 add_sample(cpu_buf, pc, event);
226 static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
228 if (nr_available_slots(cpu_buf) < 4) {
229 cpu_buf->sample_lost_overflow++;
233 add_code(cpu_buf, CPU_TRACE_BEGIN);
234 cpu_buf->tracing = 1;
238 static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
240 cpu_buf->tracing = 0;
243 void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
244 unsigned long event, int is_kernel)
246 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
248 if (!backtrace_depth) {
249 log_sample(cpu_buf, pc, is_kernel, event);
253 if (!oprofile_begin_trace(cpu_buf))
256 /* if log_sample() fail we can't backtrace since we lost the source
258 if (log_sample(cpu_buf, pc, is_kernel, event))
259 oprofile_ops.backtrace(regs, backtrace_depth);
260 oprofile_end_trace(cpu_buf);
263 void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
265 int is_kernel = !user_mode(regs);
266 unsigned long pc = profile_pc(regs);
268 oprofile_add_ext_sample(pc, regs, event, is_kernel);
271 #ifdef CONFIG_OPROFILE_IBS
273 #define MAX_IBS_SAMPLE_SIZE 14
274 static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
275 unsigned long pc, int is_kernel, unsigned int *ibs, int ibs_code)
277 struct task_struct *task;
279 cpu_buf->sample_received++;
281 if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
282 cpu_buf->sample_lost_overflow++;
286 is_kernel = !!is_kernel;
288 /* notice a switch from user->kernel or vice versa */
289 if (cpu_buf->last_is_kernel != is_kernel) {
290 cpu_buf->last_is_kernel = is_kernel;
291 add_code(cpu_buf, is_kernel);
294 /* notice a task switch */
298 if (cpu_buf->last_task != task) {
299 cpu_buf->last_task = task;
300 add_code(cpu_buf, (unsigned long)task);
304 add_code(cpu_buf, ibs_code);
305 add_sample(cpu_buf, ibs[0], ibs[1]);
306 add_sample(cpu_buf, ibs[2], ibs[3]);
307 add_sample(cpu_buf, ibs[4], ibs[5]);
309 if (ibs_code == IBS_OP_BEGIN) {
310 add_sample(cpu_buf, ibs[6], ibs[7]);
311 add_sample(cpu_buf, ibs[8], ibs[9]);
312 add_sample(cpu_buf, ibs[10], ibs[11]);
318 void oprofile_add_ibs_sample(struct pt_regs *const regs,
319 unsigned int * const ibs_sample, u8 code)
321 int is_kernel = !user_mode(regs);
322 unsigned long pc = profile_pc(regs);
324 struct oprofile_cpu_buffer *cpu_buf =
325 &per_cpu(cpu_buffer, smp_processor_id());
327 if (!backtrace_depth) {
328 log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code);
332 /* if log_sample() fails we can't backtrace since we lost the source
334 if (log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code))
335 oprofile_ops.backtrace(regs, backtrace_depth);
340 void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
342 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
343 log_sample(cpu_buf, pc, is_kernel, event);
346 void oprofile_add_trace(unsigned long pc)
348 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
350 if (!cpu_buf->tracing)
353 if (nr_available_slots(cpu_buf) < 1) {
354 cpu_buf->tracing = 0;
355 cpu_buf->sample_lost_overflow++;
359 /* broken frame can give an eip with the same value as an escape code,
360 * abort the trace if we get it */
361 if (pc == ESCAPE_CODE) {
362 cpu_buf->tracing = 0;
363 cpu_buf->backtrace_aborted++;
367 add_sample(cpu_buf, pc, 0);
371 * This serves to avoid cpu buffer overflow, and makes sure
372 * the task mortuary progresses
374 * By using schedule_delayed_work_on and then schedule_delayed_work
375 * we guarantee this will stay on the correct cpu
377 static void wq_sync_buffer(struct work_struct *work)
379 struct oprofile_cpu_buffer * b =
380 container_of(work, struct oprofile_cpu_buffer, work.work);
381 if (b->cpu != smp_processor_id()) {
382 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
383 smp_processor_id(), b->cpu);
387 /* don't re-add the work if we're shutting down */
389 schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);