2 * trace event based perf event profiling/tracing
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
12 static char *perf_trace_buf[4];
15 * Force it to be aligned to unsigned long to avoid misaligned accesses
18 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
21 /* Count the events in use (per event id, not per instance) */
22 static int total_ref_count;
24 static int perf_trace_event_init(struct ftrace_event_call *tp_event,
25 struct perf_event *p_event)
27 struct hlist_head *list;
31 p_event->tp_event = tp_event;
32 if (tp_event->perf_refcount++ > 0)
35 list = alloc_percpu(struct hlist_head);
39 for_each_possible_cpu(cpu)
40 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
42 tp_event->perf_events = list;
44 if (!total_ref_count) {
48 for (i = 0; i < 4; i++) {
49 buf = (char *)alloc_percpu(perf_trace_t);
53 perf_trace_buf[i] = buf;
57 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER);
65 if (!total_ref_count) {
68 for (i = 0; i < 4; i++) {
69 free_percpu(perf_trace_buf[i]);
70 perf_trace_buf[i] = NULL;
74 if (!--tp_event->perf_refcount) {
75 free_percpu(tp_event->perf_events);
76 tp_event->perf_events = NULL;
82 int perf_trace_init(struct perf_event *p_event)
84 struct ftrace_event_call *tp_event;
85 int event_id = p_event->attr.config;
88 mutex_lock(&event_mutex);
89 list_for_each_entry(tp_event, &ftrace_events, list) {
90 if (tp_event->event.type == event_id &&
91 tp_event->class && tp_event->class->reg &&
92 try_module_get(tp_event->mod)) {
93 ret = perf_trace_event_init(tp_event, p_event);
95 module_put(tp_event->mod);
99 mutex_unlock(&event_mutex);
104 int perf_trace_enable(struct perf_event *p_event)
106 struct ftrace_event_call *tp_event = p_event->tp_event;
107 struct hlist_head *list;
109 list = tp_event->perf_events;
110 if (WARN_ON_ONCE(!list))
113 list = this_cpu_ptr(list);
114 hlist_add_head_rcu(&p_event->hlist_entry, list);
119 void perf_trace_disable(struct perf_event *p_event)
121 hlist_del_rcu(&p_event->hlist_entry);
124 void perf_trace_destroy(struct perf_event *p_event)
126 struct ftrace_event_call *tp_event = p_event->tp_event;
129 mutex_lock(&event_mutex);
130 if (--tp_event->perf_refcount > 0)
133 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
136 * Ensure our callback won't be called anymore. The buffers
137 * will be freed after that.
139 tracepoint_synchronize_unregister();
141 free_percpu(tp_event->perf_events);
142 tp_event->perf_events = NULL;
144 if (!--total_ref_count) {
145 for (i = 0; i < 4; i++) {
146 free_percpu(perf_trace_buf[i]);
147 perf_trace_buf[i] = NULL;
151 module_put(tp_event->mod);
152 mutex_unlock(&event_mutex);
155 __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
156 struct pt_regs *regs, int *rctxp)
158 struct trace_entry *entry;
163 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
165 pc = preempt_count();
167 *rctxp = perf_swevent_get_recursion_context();
171 raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
173 /* zero the dead bytes from align to not leak stack to user */
174 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
176 entry = (struct trace_entry *)raw_data;
177 local_save_flags(flags);
178 tracing_generic_entry_update(entry, flags, pc);
183 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);