2 * Cell Broadband Engine OProfile Support
4 * (C) Copyright IBM Corporation 2006
6 * Author: David Erb (djerb@us.ibm.com)
8 * Carl Love <carll@us.ibm.com>
9 * Maynard Johnson <maynardj@us.ibm.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/cpufreq.h>
18 #include <linux/delay.h>
19 #include <linux/init.h>
20 #include <linux/jiffies.h>
21 #include <linux/kthread.h>
22 #include <linux/oprofile.h>
23 #include <linux/percpu.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/timer.h>
27 #include <asm/cell-pmu.h>
28 #include <asm/cputable.h>
29 #include <asm/firmware.h>
31 #include <asm/oprofile_impl.h>
32 #include <asm/processor.h>
34 #include <asm/ptrace.h>
37 #include <asm/system.h>
38 #include <asm/cell-regs.h>
40 #include "../platforms/cell/interrupt.h"
41 #include "cell/pr_util.h"
43 #define PPU_PROFILING 0
44 #define SPU_PROFILING_CYCLES 1
45 #define SPU_PROFILING_EVENTS 2
47 #define NUM_SPUS_PER_NODE 8
48 #define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */
50 #define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */
51 #define PPU_CYCLES_GRP_NUM 1 /* special group number for identifying
54 #define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */
56 #define NUM_THREADS 2 /* number of physical threads in
59 #define NUM_DEBUG_BUS_WORDS 4
60 #define NUM_INPUT_BUS_WORDS 2
62 #define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
65 * spu_cycle_reset is the number of cycles between samples.
66 * This variable is used for SPU profiling and should ONLY be set
67 * at the beginning of cell_reg_setup; otherwise, it's read-only.
69 static unsigned int spu_cycle_reset;
70 static unsigned int profiling_mode;
72 struct pmc_cntrl_data {
76 unsigned long enabled;
80 * ibm,cbe-perftools rtas parameters
83 u16 cpu; /* Processor to modify */
84 u16 sub_unit; /* hw subunit this applies to (if applicable)*/
85 short int signal_group; /* Signal Group to Enable/Disable */
86 u8 bus_word; /* Enable/Disable on this Trace/Trigger/Event
87 * Bus Word(s) (bitmask)
89 u8 bit; /* Trigger/Event bit (if applicable) */
98 SUBFUNC_DEACTIVATE = 3,
102 PASSTHRU_DISABLE = 2,
115 u32 debug_bus_control;
116 struct pm_cntrl pm_cntrl;
117 u32 pm07_cntrl[NR_PHYS_CTRS];
120 #define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
121 #define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
122 #define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
123 #define GET_POLARITY(x) ((x & 0x00000002) >> 1)
124 #define GET_COUNT_CYCLES(x) (x & 0x00000001)
125 #define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
127 static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
128 static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
131 * The CELL profiling code makes rtas calls to setup the debug bus to
132 * route the performance signals. Additionally, SPU profiling requires
133 * a second rtas call to setup the hardware to capture the SPU PCs.
134 * The EIO error value is returned if the token lookups or the rtas
135 * call fail. The EIO error number is the best choice of the existing
136 * error numbers. The probability of rtas related error is very low. But
137 * by returning EIO and printing additional information to dmsg the user
138 * will know that OProfile did not start and dmesg will tell them why.
139 * OProfile does not support returning errors on Stop. Not a huge issue
140 * since failure to reset the debug bus or stop the SPU PC collection is
141 * not a fatel issue. Chances are if the Stop failed, Start doesn't work
146 * Interpetation of hdw_thread:
147 * 0 - even virtual cpus 0, 2, 4,...
148 * 1 - odd virtual cpus 1, 3, 5, ...
150 * FIXME: this is strictly wrong, we need to clean this up in a number
151 * of places. It works for now. -arnd
153 static u32 hdw_thread;
155 static u32 virt_cntr_inter_mask;
156 static struct timer_list timer_virt_cntr;
159 * pm_signal needs to be global since it is initialized in
160 * cell_reg_setup at the time when the necessary information
163 static struct pm_signal pm_signal[NR_PHYS_CTRS];
164 static int pm_rtas_token; /* token for debug bus setup call */
165 static int spu_rtas_token; /* token for SPU cycle profiling */
167 static u32 reset_value[NR_PHYS_CTRS];
168 static int num_counters;
169 static int oprofile_running;
170 static DEFINE_SPINLOCK(cntr_lock);
172 static u32 ctr_enabled;
174 static unsigned char input_bus[NUM_INPUT_BUS_WORDS];
177 * Firmware interface functions
180 rtas_ibm_cbe_perftools(int subfunc, int passthru,
181 void *address, unsigned long length)
183 u64 paddr = __pa(address);
185 return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc,
186 passthru, paddr >> 32, paddr & 0xffffffff, length);
189 static void pm_rtas_reset_signals(u32 node)
192 struct pm_signal pm_signal_local;
195 * The debug bus is being set to the passthru disable state.
196 * However, the FW still expects atleast one legal signal routing
197 * entry or it will return an error on the arguments. If we don't
198 * supply a valid entry, we must ignore all return values. Ignoring
199 * all return values means we might miss an error we should be
203 /* fw expects physical cpu #. */
204 pm_signal_local.cpu = node;
205 pm_signal_local.signal_group = 21;
206 pm_signal_local.bus_word = 1;
207 pm_signal_local.sub_unit = 0;
208 pm_signal_local.bit = 0;
210 ret = rtas_ibm_cbe_perftools(SUBFUNC_RESET, PASSTHRU_DISABLE,
212 sizeof(struct pm_signal));
216 * Not a fatal error. For Oprofile stop, the oprofile
217 * functions do not support returning an error for
218 * failure to stop OProfile.
220 printk(KERN_WARNING "%s: rtas returned: %d\n",
224 static int pm_rtas_activate_signals(u32 node, u32 count)
228 struct pm_signal pm_signal_local[NR_PHYS_CTRS];
231 * There is no debug setup required for the cycles event.
232 * Note that only events in the same group can be used.
233 * Otherwise, there will be conflicts in correctly routing
234 * the signals on the debug bus. It is the responsiblity
235 * of the OProfile user tool to check the events are in
239 for (j = 0; j < count; j++) {
240 if (pm_signal[j].signal_group != PPU_CYCLES_GRP_NUM) {
242 /* fw expects physical cpu # */
243 pm_signal_local[i].cpu = node;
244 pm_signal_local[i].signal_group
245 = pm_signal[j].signal_group;
246 pm_signal_local[i].bus_word = pm_signal[j].bus_word;
247 pm_signal_local[i].sub_unit = pm_signal[j].sub_unit;
248 pm_signal_local[i].bit = pm_signal[j].bit;
254 ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE,
256 i * sizeof(struct pm_signal));
259 printk(KERN_WARNING "%s: rtas returned: %d\n",
269 * PM Signal functions
271 static void set_pm_event(u32 ctr, int event, u32 unit_mask)
275 u32 bus_word, bus_type, count_cycles, polarity, input_control;
278 if (event == PPU_CYCLES_EVENT_NUM) {
279 /* Special Event: Count all cpu cycles */
280 pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES;
281 p = &(pm_signal[ctr]);
282 p->signal_group = PPU_CYCLES_GRP_NUM;
288 pm_regs.pm07_cntrl[ctr] = 0;
291 bus_word = GET_BUS_WORD(unit_mask);
292 bus_type = GET_BUS_TYPE(unit_mask);
293 count_cycles = GET_COUNT_CYCLES(unit_mask);
294 polarity = GET_POLARITY(unit_mask);
295 input_control = GET_INPUT_CONTROL(unit_mask);
296 signal_bit = (event % 100);
298 p = &(pm_signal[ctr]);
300 p->signal_group = event / 100;
301 p->bus_word = bus_word;
302 p->sub_unit = GET_SUB_UNIT(unit_mask);
304 pm_regs.pm07_cntrl[ctr] = 0;
305 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
306 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity);
307 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control);
310 * Some of the islands signal selection is based on 64 bit words.
311 * The debug bus words are 32 bits, the input words to the performance
312 * counters are defined as 32 bits. Need to convert the 64 bit island
313 * specification to the appropriate 32 input bit and bus word for the
314 * performance counter event selection. See the CELL Performance
315 * monitoring signals manual and the Perf cntr hardware descriptions
318 if (input_control == 0) {
319 if (signal_bit > 31) {
323 else if (bus_word == 0xc)
327 if ((bus_type == 0) && p->signal_group >= 60)
329 if ((bus_type == 1) && p->signal_group >= 50)
332 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_MUX(signal_bit);
334 pm_regs.pm07_cntrl[ctr] = 0;
338 for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
339 if (bus_word & (1 << i)) {
340 pm_regs.debug_bus_control |=
341 (bus_type << (30 - (2 * i)));
343 for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
344 if (input_bus[j] == 0xff) {
346 pm_regs.group_control |=
347 (i << (30 - (2 * j)));
358 static void write_pm_cntrl(int cpu)
361 * Oprofile will use 32 bit counters, set bits 7:10 to 0
362 * pmregs.pm_cntrl is a global
366 if (pm_regs.pm_cntrl.enable == 1)
367 val |= CBE_PM_ENABLE_PERF_MON;
369 if (pm_regs.pm_cntrl.stop_at_max == 1)
370 val |= CBE_PM_STOP_AT_MAX;
372 if (pm_regs.pm_cntrl.trace_mode != 0)
373 val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
375 if (pm_regs.pm_cntrl.freeze == 1)
376 val |= CBE_PM_FREEZE_ALL_CTRS;
379 * Routine set_count_mode must be called previously to set
380 * the count mode based on the user selection of user and kernel.
382 val |= CBE_PM_COUNT_MODE_SET(pm_regs.pm_cntrl.count_mode);
383 cbe_write_pm(cpu, pm_control, val);
387 set_count_mode(u32 kernel, u32 user)
390 * The user must specify user and kernel if they want them. If
391 * neither is specified, OProfile will count in hypervisor mode.
392 * pm_regs.pm_cntrl is a global
396 pm_regs.pm_cntrl.count_mode = CBE_COUNT_ALL_MODES;
398 pm_regs.pm_cntrl.count_mode =
399 CBE_COUNT_SUPERVISOR_MODE;
402 pm_regs.pm_cntrl.count_mode = CBE_COUNT_PROBLEM_MODE;
404 pm_regs.pm_cntrl.count_mode =
405 CBE_COUNT_HYPERVISOR_MODE;
409 static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
412 pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
413 cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]);
417 * Oprofile is expected to collect data on all CPUs simultaneously.
418 * However, there is one set of performance counters per node. There are
419 * two hardware threads or virtual CPUs on each node. Hence, OProfile must
420 * multiplex in time the performance counter collection on the two virtual
421 * CPUs. The multiplexing of the performance counters is done by this
422 * virtual counter routine.
424 * The pmc_values used below is defined as 'per-cpu' but its use is
425 * more akin to 'per-node'. We need to store two sets of counter
426 * values per node -- one for the previous run and one for the next.
427 * The per-cpu[NR_PHYS_CTRS] gives us the storage we need. Each odd/even
428 * pair of per-cpu arrays is used for storing the previous and next
429 * pmc values for a given node.
430 * NOTE: We use the per-cpu variable to improve cache performance.
432 * This routine will alternate loading the virtual counters for
435 static void cell_virtual_cntr(unsigned long data)
437 int i, prev_hdw_thread, next_hdw_thread;
442 * Make sure that the interrupt_hander and the virt counter are
443 * not both playing with the counters on the same node.
446 spin_lock_irqsave(&cntr_lock, flags);
448 prev_hdw_thread = hdw_thread;
450 /* switch the cpu handling the interrupts */
451 hdw_thread = 1 ^ hdw_thread;
452 next_hdw_thread = hdw_thread;
454 pm_regs.group_control = 0;
455 pm_regs.debug_bus_control = 0;
457 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
461 * There are some per thread events. Must do the
462 * set event, for the thread that is being started
464 for (i = 0; i < num_counters; i++)
466 pmc_cntrl[next_hdw_thread][i].evnts,
467 pmc_cntrl[next_hdw_thread][i].masks);
470 * The following is done only once per each node, but
471 * we need cpu #, not node #, to pass to the cbe_xxx functions.
473 for_each_online_cpu(cpu) {
474 if (cbe_get_hw_thread_id(cpu))
478 * stop counters, save counter values, restore counts
479 * for previous thread
482 cbe_disable_pm_interrupts(cpu);
483 for (i = 0; i < num_counters; i++) {
484 per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
485 = cbe_read_ctr(cpu, i);
487 if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
489 /* If the cntr value is 0xffffffff, we must
490 * reset that to 0xfffffff0 when the current
491 * thread is restarted. This will generate a
492 * new interrupt and make sure that we never
493 * restore the counters to the max value. If
494 * the counters were restored to the max value,
495 * they do not increment and no interrupts are
496 * generated. Hence no more samples will be
497 * collected on that cpu.
499 cbe_write_ctr(cpu, i, 0xFFFFFFF0);
501 cbe_write_ctr(cpu, i,
504 next_hdw_thread)[i]);
508 * Switch to the other thread. Change the interrupt
509 * and control regs to be scheduled on the CPU
510 * corresponding to the thread to execute.
512 for (i = 0; i < num_counters; i++) {
513 if (pmc_cntrl[next_hdw_thread][i].enabled) {
515 * There are some per thread events.
516 * Must do the set event, enable_cntr
522 cbe_write_pm07_control(cpu, i, 0);
526 /* Enable interrupts on the CPU thread that is starting */
527 cbe_enable_pm_interrupts(cpu, next_hdw_thread,
528 virt_cntr_inter_mask);
532 spin_unlock_irqrestore(&cntr_lock, flags);
534 mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
537 static void start_virt_cntrs(void)
539 init_timer(&timer_virt_cntr);
540 timer_virt_cntr.function = cell_virtual_cntr;
541 timer_virt_cntr.data = 0UL;
542 timer_virt_cntr.expires = jiffies + HZ / 10;
543 add_timer(&timer_virt_cntr);
546 static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr,
547 struct op_system_config *sys, int num_ctrs)
549 spu_cycle_reset = ctr[0].count;
552 * Each node will need to make the rtas call to start
553 * and stop SPU profiling. Get the token once and store it.
555 spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
557 if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
559 "%s: rtas token ibm,cbe-spu-perftools unknown\n",
566 static int cell_reg_setup_ppu(struct op_counter_config *ctr,
567 struct op_system_config *sys, int num_ctrs)
571 num_counters = num_ctrs;
573 if (unlikely(num_ctrs > NR_PHYS_CTRS)) {
575 "%s: Oprofile, number of specified events " \
576 "exceeds number of physical counters\n",
580 pm_regs.group_control = 0;
581 pm_regs.debug_bus_control = 0;
583 /* setup the pm_control register */
584 memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl));
585 pm_regs.pm_cntrl.stop_at_max = 1;
586 pm_regs.pm_cntrl.trace_mode = 0;
587 pm_regs.pm_cntrl.freeze = 1;
589 set_count_mode(sys->enable_kernel, sys->enable_user);
591 /* Setup the thread 0 events */
592 for (i = 0; i < num_ctrs; ++i) {
594 pmc_cntrl[0][i].evnts = ctr[i].event;
595 pmc_cntrl[0][i].masks = ctr[i].unit_mask;
596 pmc_cntrl[0][i].enabled = ctr[i].enabled;
597 pmc_cntrl[0][i].vcntr = i;
599 for_each_possible_cpu(j)
600 per_cpu(pmc_values, j)[i] = 0;
604 * Setup the thread 1 events, map the thread 0 event to the
605 * equivalent thread 1 event.
607 for (i = 0; i < num_ctrs; ++i) {
608 if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111))
609 pmc_cntrl[1][i].evnts = ctr[i].event + 19;
610 else if (ctr[i].event == 2203)
611 pmc_cntrl[1][i].evnts = ctr[i].event;
612 else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215))
613 pmc_cntrl[1][i].evnts = ctr[i].event + 16;
615 pmc_cntrl[1][i].evnts = ctr[i].event;
617 pmc_cntrl[1][i].masks = ctr[i].unit_mask;
618 pmc_cntrl[1][i].enabled = ctr[i].enabled;
619 pmc_cntrl[1][i].vcntr = i;
622 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
626 * Our counters count up, and "count" refers to
627 * how much before the next interrupt, and we interrupt
628 * on overflow. So we calculate the starting value
629 * which will give us "count" until overflow.
630 * Then we set the events on the enabled counters.
632 for (i = 0; i < num_counters; ++i) {
633 /* start with virtual counter set 0 */
634 if (pmc_cntrl[0][i].enabled) {
635 /* Using 32bit counters, reset max - count */
636 reset_value[i] = 0xFFFFFFFF - ctr[i].count;
638 pmc_cntrl[0][i].evnts,
639 pmc_cntrl[0][i].masks);
641 /* global, used by cell_cpu_setup */
642 ctr_enabled |= (1 << i);
646 /* initialize the previous counts for the virtual cntrs */
647 for_each_online_cpu(cpu)
648 for (i = 0; i < num_counters; ++i) {
649 per_cpu(pmc_values, cpu)[i] = reset_value[i];
656 /* This function is called once for all cpus combined */
657 static int cell_reg_setup(struct op_counter_config *ctr,
658 struct op_system_config *sys, int num_ctrs)
665 * For all events except PPU CYCLEs, each node will need to make
666 * the rtas cbe-perftools call to setup and reset the debug bus.
667 * Make the token lookup call once and store it in the global
668 * variable pm_rtas_token.
670 pm_rtas_token = rtas_token("ibm,cbe-perftools");
672 if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
674 "%s: rtas token ibm,cbe-perftools unknown\n",
679 if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
680 profiling_mode = SPU_PROFILING_CYCLES;
681 ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs);
683 profiling_mode = PPU_PROFILING;
684 ret = cell_reg_setup_ppu(ctr, sys, num_ctrs);
692 /* This function is called once for each cpu */
693 static int cell_cpu_setup(struct op_counter_config *cntr)
695 u32 cpu = smp_processor_id();
699 /* Cycle based SPU profiling does not use the performance
700 * counters. The trace array is configured to collect
703 if (profiling_mode == SPU_PROFILING_CYCLES)
706 /* There is one performance monitor per processor chip (i.e. node),
707 * so we only need to perform this function once per node.
709 if (cbe_get_hw_thread_id(cpu))
712 /* Stop all counters */
714 cbe_disable_pm_interrupts(cpu);
716 cbe_write_pm(cpu, pm_start_stop, 0);
717 cbe_write_pm(cpu, group_control, pm_regs.group_control);
718 cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
721 for (i = 0; i < num_counters; ++i) {
722 if (ctr_enabled & (1 << i)) {
723 pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu);
729 * The pm_rtas_activate_signals will return -EIO if the FW
732 return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled);
736 #define MAXLFSR 0xFFFFFF
738 /* precomputed table of 24 bit LFSR values */
739 static int initial_lfsr[] = {
740 8221349, 12579195, 5379618, 10097839, 7512963, 7519310, 3955098, 10753424,
741 15507573, 7458917, 285419, 2641121, 9780088, 3915503, 6668768, 1548716,
742 4885000, 8774424, 9650099, 2044357, 2304411, 9326253, 10332526, 4421547,
743 3440748, 10179459, 13332843, 10375561, 1313462, 8375100, 5198480, 6071392,
744 9341783, 1526887, 3985002, 1439429, 13923762, 7010104, 11969769, 4547026,
745 2040072, 4025602, 3437678, 7939992, 11444177, 4496094, 9803157, 10745556,
746 3671780, 4257846, 5662259, 13196905, 3237343, 12077182, 16222879, 7587769,
747 14706824, 2184640, 12591135, 10420257, 7406075, 3648978, 11042541, 15906893,
748 11914928, 4732944, 10695697, 12928164, 11980531, 4430912, 11939291, 2917017,
749 6119256, 4172004, 9373765, 8410071, 14788383, 5047459, 5474428, 1737756,
750 15967514, 13351758, 6691285, 8034329, 2856544, 14394753, 11310160, 12149558,
751 7487528, 7542781, 15668898, 12525138, 12790975, 3707933, 9106617, 1965401,
752 16219109, 12801644, 2443203, 4909502, 8762329, 3120803, 6360315, 9309720,
753 15164599, 10844842, 4456529, 6667610, 14924259, 884312, 6234963, 3326042,
754 15973422, 13919464, 5272099, 6414643, 3909029, 2764324, 5237926, 4774955,
755 10445906, 4955302, 5203726, 10798229, 11443419, 2303395, 333836, 9646934,
756 3464726, 4159182, 568492, 995747, 10318756, 13299332, 4836017, 8237783,
757 3878992, 2581665, 11394667, 5672745, 14412947, 3159169, 9094251, 16467278,
758 8671392, 15230076, 4843545, 7009238, 15504095, 1494895, 9627886, 14485051,
759 8304291, 252817, 12421642, 16085736, 4774072, 2456177, 4160695, 15409741,
760 4902868, 5793091, 13162925, 16039714, 782255, 11347835, 14884586, 366972,
761 16308990, 11913488, 13390465, 2958444, 10340278, 1177858, 1319431, 10426302,
762 2868597, 126119, 5784857, 5245324, 10903900, 16436004, 3389013, 1742384,
763 14674502, 10279218, 8536112, 10364279, 6877778, 14051163, 1025130, 6072469,
764 1988305, 8354440, 8216060, 16342977, 13112639, 3976679, 5913576, 8816697,
765 6879995, 14043764, 3339515, 9364420, 15808858, 12261651, 2141560, 5636398,
766 10345425, 10414756, 781725, 6155650, 4746914, 5078683, 7469001, 6799140,
767 10156444, 9667150, 10116470, 4133858, 2121972, 1124204, 1003577, 1611214,
768 14304602, 16221850, 13878465, 13577744, 3629235, 8772583, 10881308, 2410386,
769 7300044, 5378855, 9301235, 12755149, 4977682, 8083074, 10327581, 6395087,
770 9155434, 15501696, 7514362, 14520507, 15808945, 3244584, 4741962, 9658130,
771 14336147, 8654727, 7969093, 15759799, 14029445, 5038459, 9894848, 8659300,
772 13699287, 8834306, 10712885, 14753895, 10410465, 3373251, 309501, 9561475,
773 5526688, 14647426, 14209836, 5339224, 207299, 14069911, 8722990, 2290950,
774 3258216, 12505185, 6007317, 9218111, 14661019, 10537428, 11731949, 9027003,
775 6641507, 9490160, 200241, 9720425, 16277895, 10816638, 1554761, 10431375,
776 7467528, 6790302, 3429078, 14633753, 14428997, 11463204, 3576212, 2003426,
777 6123687, 820520, 9992513, 15784513, 5778891, 6428165, 8388607
781 * The hardware uses an LFSR counting sequence to determine when to capture
782 * the SPU PCs. An LFSR sequence is like a puesdo random number sequence
783 * where each number occurs once in the sequence but the sequence is not in
784 * numerical order. The SPU PC capture is done when the LFSR sequence reaches
785 * the last value in the sequence. Hence the user specified value N
786 * corresponds to the LFSR number that is N from the end of the sequence.
788 * To avoid the time to compute the LFSR, a lookup table is used. The 24 bit
789 * LFSR sequence is broken into four ranges. The spacing of the precomputed
790 * values is adjusted in each range so the error between the user specifed
791 * number (N) of events between samples and the actual number of events based
792 * on the precomputed value will be les then about 6.2%. Note, if the user
793 * specifies N < 2^16, the LFSR value that is 2^16 from the end will be used.
794 * This is to prevent the loss of samples because the trace buffer is full.
796 * User specified N Step between Index in
797 * precomputed values precomputed
800 * 2^16 to 2^16+2^19-1 2^12 1 to 128
801 * 2^16+2^19 to 2^16+2^19+2^22-1 2^15 129 to 256
802 * 2^16+2^19+2^22 to 2^24-1 2^18 257 to 302
805 * For example, the LFSR values in the second range are computed for 2^16,
806 * 2^16+2^12, ... , 2^19-2^16, 2^19 and stored in the table at indicies
807 * 1, 2,..., 127, 128.
809 * The 24 bit LFSR value for the nth number in the sequence can be
810 * calculated using the following code:
813 * int calculate_lfsr(int n)
816 * unsigned int newlfsr0;
817 * unsigned int lfsr = 0xFFFFFF;
818 * unsigned int howmany = n;
820 * for (i = 2; i < howmany + 2; i++) {
821 * newlfsr0 = (((lfsr >> (size - 1 - 0)) & 1) ^
822 * ((lfsr >> (size - 1 - 1)) & 1) ^
823 * (((lfsr >> (size - 1 - 6)) & 1) ^
824 * ((lfsr >> (size - 1 - 23)) & 1)));
827 * lfsr = lfsr | (newlfsr0 << (size - 1));
833 #define V2_16 (0x1 << 16)
834 #define V2_19 (0x1 << 19)
835 #define V2_22 (0x1 << 22)
837 static int calculate_lfsr(int n)
840 * The ranges and steps are in powers of 2 so the calculations
841 * can be done using shifts rather then divide.
847 else if (((n - V2_16) >> 19) == 0)
848 index = ((n - V2_16) >> 12) + 1;
849 else if (((n - V2_16 - V2_19) >> 22) == 0)
850 index = ((n - V2_16 - V2_19) >> 15 ) + 1 + 128;
851 else if (((n - V2_16 - V2_19 - V2_22) >> 24) == 0)
852 index = ((n - V2_16 - V2_19 - V2_22) >> 18 ) + 1 + 256;
856 /* make sure index is valid */
857 if ((index > ENTRIES) || (index < 0))
860 return initial_lfsr[index];
863 static int pm_rtas_activate_spu_profiling(u32 node)
866 struct pm_signal pm_signal_local[NUM_SPUS_PER_NODE];
869 * Set up the rtas call to configure the debug bus to
870 * route the SPU PCs. Setup the pm_signal for each SPU
872 for (i = 0; i < ARRAY_SIZE(pm_signal_local); i++) {
873 pm_signal_local[i].cpu = node;
874 pm_signal_local[i].signal_group = 41;
875 /* spu i on word (i/2) */
876 pm_signal_local[i].bus_word = 1 << i / 2;
878 pm_signal_local[i].sub_unit = i;
879 pm_signal_local[i].bit = 63;
882 ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE,
883 PASSTHRU_ENABLE, pm_signal_local,
884 (ARRAY_SIZE(pm_signal_local)
885 * sizeof(struct pm_signal)));
888 printk(KERN_WARNING "%s: rtas returned: %d\n",
896 #ifdef CONFIG_CPU_FREQ
898 oprof_cpufreq_notify(struct notifier_block *nb, unsigned long val, void *data)
901 struct cpufreq_freqs *frq = data;
902 if ((val == CPUFREQ_PRECHANGE && frq->old < frq->new) ||
903 (val == CPUFREQ_POSTCHANGE && frq->old > frq->new) ||
904 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE))
905 set_spu_profiling_frequency(frq->new, spu_cycle_reset);
909 static struct notifier_block cpu_freq_notifier_block = {
910 .notifier_call = oprof_cpufreq_notify
915 * Note the generic OProfile stop calls do not support returning
916 * an error on stop. Hence, will not return an error if the FW
917 * calls fail on stop. Failure to reset the debug bus is not an issue.
918 * Failure to disable the SPU profiling is not an issue. The FW calls
919 * to enable the performance counters and debug bus will work even if
920 * the hardware was not cleanly reset.
922 static void cell_global_stop_spu_cycles(void)
924 int subfunc, rtn_value;
925 unsigned int lfsr_value;
928 oprofile_running = 0;
930 #ifdef CONFIG_CPU_FREQ
931 cpufreq_unregister_notifier(&cpu_freq_notifier_block,
932 CPUFREQ_TRANSITION_NOTIFIER);
935 for_each_online_cpu(cpu) {
936 if (cbe_get_hw_thread_id(cpu))
940 * 2 - activate SPU tracing,
943 lfsr_value = 0x8f100000;
945 rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
946 subfunc, cbe_cpu_to_node(cpu),
949 if (unlikely(rtn_value != 0)) {
951 "%s: rtas call ibm,cbe-spu-perftools " \
952 "failed, return = %d\n",
953 __func__, rtn_value);
956 /* Deactivate the signals */
957 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
960 if (profiling_mode == SPU_PROFILING_CYCLES)
961 stop_spu_profiling_cycles();
964 static void cell_global_stop_ppu(void)
969 * This routine will be called once for the system.
970 * There is one performance monitor per node, so we
971 * only need to perform this function once per node.
973 del_timer_sync(&timer_virt_cntr);
974 oprofile_running = 0;
977 for_each_online_cpu(cpu) {
978 if (cbe_get_hw_thread_id(cpu))
981 cbe_sync_irq(cbe_cpu_to_node(cpu));
982 /* Stop the counters */
985 /* Deactivate the signals */
986 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
988 /* Deactivate interrupts */
989 cbe_disable_pm_interrupts(cpu);
993 static void cell_global_stop(void)
995 if (profiling_mode == PPU_PROFILING)
996 cell_global_stop_ppu();
998 cell_global_stop_spu_cycles();
1001 static int cell_global_start_spu_cycles(struct op_counter_config *ctr)
1004 unsigned int lfsr_value;
1008 unsigned int cpu_khzfreq = 0;
1010 /* The SPU profiling uses time-based profiling based on
1011 * cpu frequency, so if configured with the CPU_FREQ
1012 * option, we should detect frequency changes and react
1015 #ifdef CONFIG_CPU_FREQ
1016 ret = cpufreq_register_notifier(&cpu_freq_notifier_block,
1017 CPUFREQ_TRANSITION_NOTIFIER);
1019 /* this is not a fatal error */
1020 printk(KERN_ERR "CPU freq change registration failed: %d\n",
1024 cpu_khzfreq = cpufreq_quick_get(smp_processor_id());
1027 set_spu_profiling_frequency(cpu_khzfreq, spu_cycle_reset);
1029 for_each_online_cpu(cpu) {
1030 if (cbe_get_hw_thread_id(cpu))
1034 * Setup SPU cycle-based profiling.
1035 * Set perf_mon_control bit 0 to a zero before
1036 * enabling spu collection hardware.
1038 cbe_write_pm(cpu, pm_control, 0);
1040 if (spu_cycle_reset > MAX_SPU_COUNT)
1041 /* use largest possible value */
1042 lfsr_value = calculate_lfsr(MAX_SPU_COUNT-1);
1044 lfsr_value = calculate_lfsr(spu_cycle_reset);
1046 /* must use a non zero value. Zero disables data collection. */
1047 if (lfsr_value == 0)
1048 lfsr_value = calculate_lfsr(1);
1050 lfsr_value = lfsr_value << 8; /* shift lfsr to correct
1054 /* debug bus setup */
1055 ret = pm_rtas_activate_spu_profiling(cbe_cpu_to_node(cpu));
1057 if (unlikely(ret)) {
1063 subfunc = 2; /* 2 - activate SPU tracing, 3 - deactivate */
1065 /* start profiling */
1066 ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
1067 cbe_cpu_to_node(cpu), lfsr_value);
1069 if (unlikely(ret != 0)) {
1071 "%s: rtas call ibm,cbe-spu-perftools failed, " \
1072 "return = %d\n", __func__, ret);
1078 rtas_error = start_spu_profiling_cycles(spu_cycle_reset);
1082 oprofile_running = 1;
1086 cell_global_stop_spu_cycles(); /* clean up the PMU/debug bus */
1091 static int cell_global_start_ppu(struct op_counter_config *ctr)
1094 u32 interrupt_mask = 0;
1096 /* This routine gets called once for the system.
1097 * There is one performance monitor per node, so we
1098 * only need to perform this function once per node.
1100 for_each_online_cpu(cpu) {
1101 if (cbe_get_hw_thread_id(cpu))
1106 for (i = 0; i < num_counters; ++i) {
1107 if (ctr_enabled & (1 << i)) {
1108 cbe_write_ctr(cpu, i, reset_value[i]);
1109 enable_ctr(cpu, i, pm_regs.pm07_cntrl);
1111 CBE_PM_CTR_OVERFLOW_INTR(i);
1113 /* Disable counter */
1114 cbe_write_pm07_control(cpu, i, 0);
1118 cbe_get_and_clear_pm_interrupts(cpu);
1119 cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
1123 virt_cntr_inter_mask = interrupt_mask;
1124 oprofile_running = 1;
1128 * NOTE: start_virt_cntrs will result in cell_virtual_cntr() being
1129 * executed which manipulates the PMU. We start the "virtual counter"
1130 * here so that we do not need to synchronize access to the PMU in
1131 * the above for-loop.
1138 static int cell_global_start(struct op_counter_config *ctr)
1140 if (profiling_mode == SPU_PROFILING_CYCLES)
1141 return cell_global_start_spu_cycles(ctr);
1143 return cell_global_start_ppu(ctr);
1147 static void cell_handle_interrupt_ppu(struct pt_regs *regs,
1148 struct op_counter_config *ctr)
1153 unsigned long flags = 0;
1157 cpu = smp_processor_id();
1160 * Need to make sure the interrupt handler and the virt counter
1161 * routine are not running at the same time. See the
1162 * cell_virtual_cntr() routine for additional comments.
1164 spin_lock_irqsave(&cntr_lock, flags);
1167 * Need to disable and reenable the performance counters
1168 * to get the desired behavior from the hardware. This
1169 * is hardware specific.
1172 cbe_disable_pm(cpu);
1174 interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
1177 * If the interrupt mask has been cleared, then the virt cntr
1178 * has cleared the interrupt. When the thread that generated
1179 * the interrupt is restored, the data count will be restored to
1180 * 0xffffff0 to cause the interrupt to be regenerated.
1183 if ((oprofile_running == 1) && (interrupt_mask != 0)) {
1185 is_kernel = is_kernel_addr(pc);
1187 for (i = 0; i < num_counters; ++i) {
1188 if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i))
1189 && ctr[i].enabled) {
1190 oprofile_add_ext_sample(pc, regs, i, is_kernel);
1191 cbe_write_ctr(cpu, i, reset_value[i]);
1196 * The counters were frozen by the interrupt.
1197 * Reenable the interrupt and restart the counters.
1198 * If there was a race between the interrupt handler and
1199 * the virtual counter routine. The virutal counter
1200 * routine may have cleared the interrupts. Hence must
1201 * use the virt_cntr_inter_mask to re-enable the interrupts.
1203 cbe_enable_pm_interrupts(cpu, hdw_thread,
1204 virt_cntr_inter_mask);
1207 * The writes to the various performance counters only writes
1208 * to a latch. The new values (interrupt setting bits, reset
1209 * counter value etc.) are not copied to the actual registers
1210 * until the performance monitor is enabled. In order to get
1211 * this to work as desired, the permormance monitor needs to
1212 * be disabled while writing to the latches. This is a
1217 spin_unlock_irqrestore(&cntr_lock, flags);
1220 static void cell_handle_interrupt(struct pt_regs *regs,
1221 struct op_counter_config *ctr)
1223 if (profiling_mode == PPU_PROFILING)
1224 cell_handle_interrupt_ppu(regs, ctr);
1228 * This function is called from the generic OProfile
1229 * driver. When profiling PPUs, we need to do the
1230 * generic sync start; otherwise, do spu_sync_start.
1232 static int cell_sync_start(void)
1234 if ((profiling_mode == SPU_PROFILING_CYCLES) ||
1235 (profiling_mode == SPU_PROFILING_EVENTS))
1236 return spu_sync_start();
1238 return DO_GENERIC_SYNC;
1241 static int cell_sync_stop(void)
1243 if ((profiling_mode == SPU_PROFILING_CYCLES) ||
1244 (profiling_mode == SPU_PROFILING_EVENTS))
1245 return spu_sync_stop();
1250 struct op_powerpc_model op_model_cell = {
1251 .reg_setup = cell_reg_setup,
1252 .cpu_setup = cell_cpu_setup,
1253 .global_start = cell_global_start,
1254 .global_stop = cell_global_stop,
1255 .sync_start = cell_sync_start,
1256 .sync_stop = cell_sync_stop,
1257 .handle_interrupt = cell_handle_interrupt,