1 /* Include in trace.c */
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
6 static inline int trace_valid_entry(struct trace_entry *entry)
17 trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
19 struct trace_entry *entries;
24 BUG_ON(list_empty(&data->trace_pages));
25 page = list_entry(data->trace_pages.next, struct page, lru);
26 entries = page_address(page);
28 if (head_page(data) != entries)
32 * The starting trace buffer always has valid elements,
33 * if any element exists.
35 entries = head_page(data);
37 for (i = 0; i < tr->entries; i++) {
39 if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
40 printk(KERN_CONT ".. invalid entry %d ",
46 if (idx >= ENTRIES_PER_PAGE) {
47 page = virt_to_page(entries);
48 if (page->lru.next == &data->trace_pages) {
49 if (i != tr->entries - 1) {
50 printk(KERN_CONT ".. entries buffer mismatch");
54 page = list_entry(page->lru.next, struct page, lru);
55 entries = page_address(page);
61 page = virt_to_page(entries);
62 if (page->lru.next != &data->trace_pages) {
63 printk(KERN_CONT ".. too many entries");
70 printk(KERN_CONT ".. corrupted trace buffer .. ");
75 * Test the trace buffer to see if all the elements
78 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
80 unsigned long cnt = 0;
84 for_each_possible_cpu(cpu) {
85 if (!head_page(tr->data[cpu]))
88 cnt += tr->data[cpu]->trace_idx;
90 ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
103 * Simple verification test of ftrace function tracer.
104 * Enable ftrace, sleep 1/10 second, and then read the trace
105 * buffer to see if all is in order.
108 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
113 /* make sure functions have been recorded */
114 ret = ftrace_force_update();
116 printk(KERN_CONT ".. ftraced failed .. ");
120 /* start the tracing */
125 /* Sleep for a 1/10 of a second */
127 /* stop the tracing. */
129 trace->ctrl_update(tr);
132 /* check the trace buffer */
133 ret = trace_test_buffer(tr, &count);
136 if (!ret && !count) {
137 printk(KERN_CONT ".. no entries found ..");
143 #endif /* CONFIG_FTRACE */
145 #ifdef CONFIG_IRQSOFF_TRACER
147 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
149 unsigned long save_max = tracing_max_latency;
153 /* start the tracing */
156 /* reset the max latency */
157 tracing_max_latency = 0;
158 /* disable interrupts for a bit */
162 /* stop the tracing. */
164 trace->ctrl_update(tr);
165 /* check both trace buffers */
166 ret = trace_test_buffer(tr, NULL);
168 ret = trace_test_buffer(&max_tr, &count);
171 if (!ret && !count) {
172 printk(KERN_CONT ".. no entries found ..");
176 tracing_max_latency = save_max;
180 #endif /* CONFIG_IRQSOFF_TRACER */
182 #ifdef CONFIG_PREEMPT_TRACER
184 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
186 unsigned long save_max = tracing_max_latency;
190 /* start the tracing */
193 /* reset the max latency */
194 tracing_max_latency = 0;
195 /* disable preemption for a bit */
199 /* stop the tracing. */
201 trace->ctrl_update(tr);
202 /* check both trace buffers */
203 ret = trace_test_buffer(tr, NULL);
205 ret = trace_test_buffer(&max_tr, &count);
208 if (!ret && !count) {
209 printk(KERN_CONT ".. no entries found ..");
213 tracing_max_latency = save_max;
217 #endif /* CONFIG_PREEMPT_TRACER */
219 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
221 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
223 unsigned long save_max = tracing_max_latency;
227 /* start the tracing */
231 /* reset the max latency */
232 tracing_max_latency = 0;
234 /* disable preemption and interrupts for a bit */
239 /* reverse the order of preempt vs irqs */
242 /* stop the tracing. */
244 trace->ctrl_update(tr);
245 /* check both trace buffers */
246 ret = trace_test_buffer(tr, NULL);
250 ret = trace_test_buffer(&max_tr, &count);
254 if (!ret && !count) {
255 printk(KERN_CONT ".. no entries found ..");
260 /* do the test by disabling interrupts first this time */
261 tracing_max_latency = 0;
263 trace->ctrl_update(tr);
268 /* reverse the order of preempt vs irqs */
271 /* stop the tracing. */
273 trace->ctrl_update(tr);
274 /* check both trace buffers */
275 ret = trace_test_buffer(tr, NULL);
279 ret = trace_test_buffer(&max_tr, &count);
281 if (!ret && !count) {
282 printk(KERN_CONT ".. no entries found ..");
289 tracing_max_latency = save_max;
293 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
295 #ifdef CONFIG_SCHED_TRACER
296 static int trace_wakeup_test_thread(void *data)
298 struct completion *x = data;
300 /* Make this a RT thread, doesn't need to be too high */
302 rt_mutex_setprio(current, MAX_RT_PRIO - 5);
304 /* Make it know we have a new prio */
307 /* now go to sleep and let the test wake us up */
308 set_current_state(TASK_INTERRUPTIBLE);
311 /* we are awake, now wait to disappear */
312 while (!kthread_should_stop()) {
314 * This is an RT task, do short sleeps to let
324 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
326 unsigned long save_max = tracing_max_latency;
327 struct task_struct *p;
328 struct completion isrt;
332 init_completion(&isrt);
334 /* create a high prio thread */
335 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
337 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
341 /* make sure the thread is running at an RT prio */
342 wait_for_completion(&isrt);
344 /* start the tracing */
347 /* reset the max latency */
348 tracing_max_latency = 0;
350 /* sleep to let the RT thread sleep too */
354 * Yes this is slightly racy. It is possible that for some
355 * strange reason that the RT thread we created, did not
356 * call schedule for 100ms after doing the completion,
357 * and we do a wakeup on a task that already is awake.
358 * But that is extremely unlikely, and the worst thing that
359 * happens in such a case, is that we disable tracing.
360 * Honestly, if this race does happen something is horrible
361 * wrong with the system.
366 /* stop the tracing. */
368 trace->ctrl_update(tr);
369 /* check both trace buffers */
370 ret = trace_test_buffer(tr, NULL);
372 ret = trace_test_buffer(&max_tr, &count);
377 tracing_max_latency = save_max;
379 /* kill the thread */
382 if (!ret && !count) {
383 printk(KERN_CONT ".. no entries found ..");
389 #endif /* CONFIG_SCHED_TRACER */
391 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
393 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
398 /* start the tracing */
401 /* Sleep for a 1/10 of a second */
403 /* stop the tracing. */
405 trace->ctrl_update(tr);
406 /* check the trace buffer */
407 ret = trace_test_buffer(tr, &count);
410 if (!ret && !count) {
411 printk(KERN_CONT ".. no entries found ..");
417 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
419 #ifdef CONFIG_DYNAMIC_FTRACE
420 #endif /* CONFIG_DYNAMIC_FTRACE */