4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
8 * @author Barry Kasindorf <barry.kasindorf@amd.com>
10 * Each CPU has a local buffer that stores PC value/event
11 * pairs. We also log context switches when we notice them.
12 * Eventually each CPU's buffer is processed into the global
13 * event buffer by sync_buffer().
15 * We use a local buffer for two reasons: an NMI or similar
16 * interrupt cannot synchronise, and high sampling rates
17 * would lead to catastrophic global synchronisation if
18 * a global buffer was used.
21 #include <linux/sched.h>
22 #include <linux/oprofile.h>
23 #include <linux/vmalloc.h>
24 #include <linux/errno.h>
26 #include "event_buffer.h"
27 #include "cpu_buffer.h"
28 #include "buffer_sync.h"
31 DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
33 static void wq_sync_buffer(struct work_struct *work);
35 #define DEFAULT_TIMER_EXPIRE (HZ / 10)
36 static int work_enabled;
38 void free_cpu_buffers(void)
42 for_each_possible_cpu(i) {
43 vfree(per_cpu(cpu_buffer, i).buffer);
44 per_cpu(cpu_buffer, i).buffer = NULL;
48 unsigned long oprofile_get_cpu_buffer_size(void)
50 return fs_cpu_buffer_size;
53 void oprofile_cpu_buffer_inc_smpl_lost(void)
55 struct oprofile_cpu_buffer *cpu_buf
56 = &__get_cpu_var(cpu_buffer);
58 cpu_buf->sample_lost_overflow++;
61 int alloc_cpu_buffers(void)
65 unsigned long buffer_size = fs_cpu_buffer_size;
67 for_each_possible_cpu(i) {
68 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
70 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
76 b->last_is_kernel = -1;
78 b->buffer_size = buffer_size;
81 b->sample_received = 0;
82 b->sample_lost_overflow = 0;
83 b->backtrace_aborted = 0;
84 b->sample_invalid_eip = 0;
86 INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
95 void start_cpu_work(void)
101 for_each_online_cpu(i) {
102 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
105 * Spread the work by 1 jiffy per cpu so they dont all
108 schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
112 void end_cpu_work(void)
118 for_each_online_cpu(i) {
119 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
121 cancel_delayed_work(&b->work);
124 flush_scheduled_work();
127 /* compute number of available slots in cpu_buffer queue */
128 static unsigned long nr_available_slots(struct oprofile_cpu_buffer const *b)
130 unsigned long head = b->head_pos;
131 unsigned long tail = b->tail_pos;
134 return (tail - head) - 1;
136 return tail + (b->buffer_size - head) - 1;
140 add_sample(struct oprofile_cpu_buffer *cpu_buf,
141 unsigned long pc, unsigned long event)
143 struct op_sample *entry = cpu_buffer_write_entry(cpu_buf);
145 entry->event = event;
146 cpu_buffer_write_commit(cpu_buf);
150 add_code(struct oprofile_cpu_buffer *buffer, unsigned long value)
152 add_sample(buffer, ESCAPE_CODE, value);
155 /* This must be safe from any context. It's safe writing here
156 * because of the head/tail separation of the writer and reader
159 * is_kernel is needed because on some architectures you cannot
160 * tell if you are in kernel or user space simply by looking at
161 * pc. We tag this in the buffer by generating kernel enter/exit
162 * events whenever is_kernel changes
164 static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
165 int is_kernel, unsigned long event)
167 struct task_struct *task;
169 cpu_buf->sample_received++;
171 if (pc == ESCAPE_CODE) {
172 cpu_buf->sample_invalid_eip++;
176 if (nr_available_slots(cpu_buf) < 3) {
177 cpu_buf->sample_lost_overflow++;
181 is_kernel = !!is_kernel;
185 /* notice a switch from user->kernel or vice versa */
186 if (cpu_buf->last_is_kernel != is_kernel) {
187 cpu_buf->last_is_kernel = is_kernel;
188 add_code(cpu_buf, is_kernel);
191 /* notice a task switch */
192 if (cpu_buf->last_task != task) {
193 cpu_buf->last_task = task;
194 add_code(cpu_buf, (unsigned long)task);
197 add_sample(cpu_buf, pc, event);
201 static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
203 if (nr_available_slots(cpu_buf) < 4) {
204 cpu_buf->sample_lost_overflow++;
208 add_code(cpu_buf, CPU_TRACE_BEGIN);
209 cpu_buf->tracing = 1;
213 static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
215 cpu_buf->tracing = 0;
218 void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
219 unsigned long event, int is_kernel)
221 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
223 if (!backtrace_depth) {
224 log_sample(cpu_buf, pc, is_kernel, event);
228 if (!oprofile_begin_trace(cpu_buf))
232 * if log_sample() fail we can't backtrace since we lost the
233 * source of this event
235 if (log_sample(cpu_buf, pc, is_kernel, event))
236 oprofile_ops.backtrace(regs, backtrace_depth);
237 oprofile_end_trace(cpu_buf);
240 void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
242 int is_kernel = !user_mode(regs);
243 unsigned long pc = profile_pc(regs);
245 oprofile_add_ext_sample(pc, regs, event, is_kernel);
248 #ifdef CONFIG_OPROFILE_IBS
250 #define MAX_IBS_SAMPLE_SIZE 14
252 void oprofile_add_ibs_sample(struct pt_regs * const regs,
253 unsigned int * const ibs_sample, int ibs_code)
255 int is_kernel = !user_mode(regs);
256 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
257 struct task_struct *task;
259 cpu_buf->sample_received++;
261 if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
262 /* we can't backtrace since we lost the source of this event */
263 cpu_buf->sample_lost_overflow++;
267 /* notice a switch from user->kernel or vice versa */
268 if (cpu_buf->last_is_kernel != is_kernel) {
269 cpu_buf->last_is_kernel = is_kernel;
270 add_code(cpu_buf, is_kernel);
273 /* notice a task switch */
276 if (cpu_buf->last_task != task) {
277 cpu_buf->last_task = task;
278 add_code(cpu_buf, (unsigned long)task);
282 add_code(cpu_buf, ibs_code);
283 add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
284 add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
285 add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
287 if (ibs_code == IBS_OP_BEGIN) {
288 add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
289 add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
290 add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
294 oprofile_ops.backtrace(regs, backtrace_depth);
299 void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
301 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
302 log_sample(cpu_buf, pc, is_kernel, event);
305 void oprofile_add_trace(unsigned long pc)
307 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
309 if (!cpu_buf->tracing)
312 if (nr_available_slots(cpu_buf) < 1) {
313 cpu_buf->tracing = 0;
314 cpu_buf->sample_lost_overflow++;
319 * broken frame can give an eip with the same value as an
320 * escape code, abort the trace if we get it
322 if (pc == ESCAPE_CODE) {
323 cpu_buf->tracing = 0;
324 cpu_buf->backtrace_aborted++;
328 add_sample(cpu_buf, pc, 0);
332 * This serves to avoid cpu buffer overflow, and makes sure
333 * the task mortuary progresses
335 * By using schedule_delayed_work_on and then schedule_delayed_work
336 * we guarantee this will stay on the correct cpu
338 static void wq_sync_buffer(struct work_struct *work)
340 struct oprofile_cpu_buffer *b =
341 container_of(work, struct oprofile_cpu_buffer, work.work);
342 if (b->cpu != smp_processor_id()) {
343 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
344 smp_processor_id(), b->cpu);
346 if (!cpu_online(b->cpu)) {
347 cancel_delayed_work(&b->work);
353 /* don't re-add the work if we're shutting down */
355 schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);