2 * @file op_model_ppro.h
3 * Family 6 perfmon and architectural perfmon MSR operations
5 * @remark Copyright 2002 OProfile authors
6 * @remark Copyright 2008 Intel Corporation
7 * @remark Read the file COPYING
10 * @author Philippe Elie
11 * @author Graydon Hoare
13 * @author Robert Richter <robert.richter@amd.com>
16 #include <linux/oprofile.h>
17 #include <linux/slab.h>
18 #include <asm/ptrace.h>
23 #include "op_x86_model.h"
24 #include "op_counter.h"
26 static int num_counters = 2;
27 static int counter_width = 32;
29 #define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21))
31 static u64 *reset_value;
33 static void ppro_fill_in_addresses(struct op_msrs * const msrs)
37 for (i = 0; i < num_counters; i++) {
38 if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
40 if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) {
41 release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
44 /* both registers must be reserved */
45 msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
46 msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
51 static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
52 struct op_msrs const * const msrs)
58 reset_value = kzalloc(sizeof(reset_value[0]) * num_counters,
64 if (cpu_has_arch_perfmon) {
65 union cpuid10_eax eax;
66 eax.full = cpuid_eax(0xa);
69 * For Core2 (family 6, model 15), don't reset the
72 if (!(eax.split.version_id == 0 &&
73 current_cpu_data.x86 == 6 &&
74 current_cpu_data.x86_model == 15)) {
76 if (counter_width < eax.split.bit_width)
77 counter_width = eax.split.bit_width;
81 /* clear all counters */
82 for (i = 0; i < num_counters; ++i) {
83 if (unlikely(!msrs->controls[i].addr)) {
84 if (counter_config[i].enabled && !smp_processor_id())
86 * counter is reserved, this is on all
87 * cpus, so report only for cpu #0
89 op_x86_warn_reserved(i);
92 rdmsrl(msrs->controls[i].addr, val);
93 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
94 op_x86_warn_in_use(i);
95 val &= model->reserved;
96 wrmsrl(msrs->controls[i].addr, val);
98 * avoid a false detection of ctr overflows in NMI *
101 wrmsrl(msrs->counters[i].addr, -1LL);
104 /* enable active counters */
105 for (i = 0; i < num_counters; ++i) {
106 if (counter_config[i].enabled && msrs->counters[i].addr) {
107 reset_value[i] = counter_config[i].count;
108 wrmsrl(msrs->counters[i].addr, -reset_value[i]);
109 rdmsrl(msrs->controls[i].addr, val);
110 val &= model->reserved;
111 val |= op_x86_get_ctrl(model, &counter_config[i]);
112 wrmsrl(msrs->controls[i].addr, val);
120 static int ppro_check_ctrs(struct pt_regs * const regs,
121 struct op_msrs const * const msrs)
127 * This can happen if perf counters are in use when
128 * we steal the die notifier NMI.
130 if (unlikely(!reset_value))
133 for (i = 0; i < num_counters; ++i) {
136 rdmsrl(msrs->counters[i].addr, val);
137 if (val & (1ULL << (counter_width - 1)))
139 oprofile_add_sample(regs, i);
140 wrmsrl(msrs->counters[i].addr, -reset_value[i]);
144 /* Only P6 based Pentium M need to re-unmask the apic vector but it
145 * doesn't hurt other P6 variant */
146 apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
148 /* We can't work out if we really handled an interrupt. We
149 * might have caught a *second* counter just after overflowing
150 * the interrupt for this counter then arrives
151 * and we don't find a counter that's overflowed, so we
152 * would return 0 and get dazed + confused. Instead we always
153 * assume we found an overflow. This sucks.
159 static void ppro_start(struct op_msrs const * const msrs)
166 for (i = 0; i < num_counters; ++i) {
167 if (reset_value[i]) {
168 rdmsrl(msrs->controls[i].addr, val);
169 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
170 wrmsrl(msrs->controls[i].addr, val);
176 static void ppro_stop(struct op_msrs const * const msrs)
183 for (i = 0; i < num_counters; ++i) {
186 rdmsrl(msrs->controls[i].addr, val);
187 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
188 wrmsrl(msrs->controls[i].addr, val);
192 static void ppro_shutdown(struct op_msrs const * const msrs)
196 for (i = 0; i < num_counters; ++i) {
197 if (!msrs->counters[i].addr)
199 release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
200 release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
209 struct op_x86_model_spec op_ppro_spec = {
212 .reserved = MSR_PPRO_EVENTSEL_RESERVED,
213 .fill_in_addresses = &ppro_fill_in_addresses,
214 .setup_ctrs = &ppro_setup_ctrs,
215 .check_ctrs = &ppro_check_ctrs,
216 .start = &ppro_start,
218 .shutdown = &ppro_shutdown
222 * Architectural performance monitoring.
224 * Newer Intel CPUs (Core1+) have support for architectural
225 * events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details.
226 * The advantage of this is that it can be done without knowing about
230 static void arch_perfmon_setup_counters(void)
232 union cpuid10_eax eax;
234 eax.full = cpuid_eax(0xa);
236 /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
237 if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
238 current_cpu_data.x86_model == 15) {
239 eax.split.version_id = 2;
240 eax.split.num_events = 2;
241 eax.split.bit_width = 40;
244 num_counters = eax.split.num_events;
246 op_arch_perfmon_spec.num_counters = num_counters;
247 op_arch_perfmon_spec.num_controls = num_counters;
250 static int arch_perfmon_init(struct oprofile_operations *ignore)
252 arch_perfmon_setup_counters();
256 struct op_x86_model_spec op_arch_perfmon_spec = {
257 .reserved = MSR_PPRO_EVENTSEL_RESERVED,
258 .init = &arch_perfmon_init,
259 /* num_counters/num_controls filled in at runtime */
260 .fill_in_addresses = &ppro_fill_in_addresses,
261 /* user space does the cpuid check for available events */
262 .setup_ctrs = &ppro_setup_ctrs,
263 .check_ctrs = &ppro_check_ctrs,
264 .start = &ppro_start,
266 .shutdown = &ppro_shutdown