return -1;
}
+ /*
+ * When this gets called we hold the BKL which means that
+ * preemption is disabled. Various trace selftests however
+ * need to disable and enable preemption for successful tests.
+ * So we drop the BKL here and grab it after the tests again.
+ */
+ unlock_kernel();
mutex_lock(&trace_types_lock);
+
for (t = trace_types; t; t = t->next) {
if (strcmp(type->name, t->name) == 0) {
/* already found */
}
#ifdef CONFIG_FTRACE_STARTUP_TEST
- /*
- * When this gets called we hold the BKL which means that preemption
- * is disabled. Various trace selftests however need to disable
- * and enable preemption for successful tests. So we drop the BKL here
- * and grab it after the tests again.
- */
- unlock_kernel();
if (type->selftest) {
struct tracer *saved_tracer = current_trace;
struct trace_array *tr = &global_trace;
* internal tracing to verify that everything is in order.
* If we fail, we do not register this tracer.
*/
- for_each_tracing_cpu(i) {
+ for_each_tracing_cpu(i)
tracing_reset(tr, i);
- }
+
current_trace = type;
/* the test is responsible for initializing and enabling */
pr_info("Testing tracer %s: ", type->name);
goto out;
}
/* Only reset on passing, to avoid touching corrupted buffers */
- for_each_tracing_cpu(i) {
+ for_each_tracing_cpu(i)
tracing_reset(tr, i);
- }
+
printk(KERN_CONT "PASSED\n");
}
- lock_kernel();
#endif
type->next = trace_types;
out:
mutex_unlock(&trace_types_lock);
+ lock_kernel();
return ret;
}