]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - arch/mips/kernel/perf_event.c
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus
[mv-sheeva.git] / arch / mips / kernel / perf_event.c
index eefdf60da2d796f426b54eb2be9746f90c988c5f..2b7f3f703b83cbeb86f48926517f1e0887f07496 100644 (file)
@@ -6,7 +6,8 @@
  *
  * This code is based on the implementation for ARM, which is in turn
  * based on the sparc64 perf event code and the x86 code. Performance
- * counter access is based on the MIPS Oprofile code.
+ * counter access is based on the MIPS Oprofile code. And the callchain
+ * support references the code of MIPS stacktrace.c.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -87,6 +88,9 @@ struct mips_perf_event {
 #endif
 };
 
+static struct mips_perf_event raw_event;
+static DEFINE_MUTEX(raw_event_mutex);
+
 #define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
 #define C(x) PERF_COUNT_HW_CACHE_##x
 
@@ -103,6 +107,7 @@ struct mips_pmu {
        void            (*write_counter)(unsigned int idx, u64 val);
        void            (*enable_event)(struct hw_perf_event *evt, int idx);
        void            (*disable_event)(int idx);
+       const struct mips_perf_event *(*map_raw_event)(u64 config);
        const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
        const struct mips_perf_event (*cache_event_map)
                                [PERF_COUNT_HW_CACHE_MAX]
@@ -408,6 +413,7 @@ static int validate_group(struct perf_event *event)
  * mipsxx/rm9000/loongson2 have different performance counters, they have
  * specific low-level init routines.
  */
+static void reset_counters(void *arg);
 static int __hw_perf_event_init(struct perf_event *event);
 
 static void hw_perf_event_destroy(struct perf_event *event)
@@ -486,3 +492,110 @@ handle_associated_event(struct cpu_hw_events *cpuc,
        if (perf_event_overflow(event, 0, data, regs))
                mipspmu->disable_event(idx);
 }
+
+#include "perf_event_mipsxx.c"
+
+/* Callchain handling code. */
+static inline void
+callchain_store(struct perf_callchain_entry *entry,
+               u64 ip)
+{
+       if (entry->nr < PERF_MAX_STACK_DEPTH)
+               entry->ip[entry->nr++] = ip;
+}
+
+/*
+ * Leave userspace callchain empty for now. When we find a way to trace
+ * the user stack callchains, we add here.
+ */
+static void
+perf_callchain_user(struct pt_regs *regs,
+                   struct perf_callchain_entry *entry)
+{
+}
+
+static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
+       unsigned long reg29)
+{
+       unsigned long *sp = (unsigned long *)reg29;
+       unsigned long addr;
+
+       while (!kstack_end(sp)) {
+               addr = *sp++;
+               if (__kernel_text_address(addr)) {
+                       callchain_store(entry, addr);
+                       if (entry->nr >= PERF_MAX_STACK_DEPTH)
+                               break;
+               }
+       }
+}
+
+static void
+perf_callchain_kernel(struct pt_regs *regs,
+                     struct perf_callchain_entry *entry)
+{
+       unsigned long sp = regs->regs[29];
+#ifdef CONFIG_KALLSYMS
+       unsigned long ra = regs->regs[31];
+       unsigned long pc = regs->cp0_epc;
+
+       callchain_store(entry, PERF_CONTEXT_KERNEL);
+       if (raw_show_trace || !__kernel_text_address(pc)) {
+               unsigned long stack_page =
+                       (unsigned long)task_stack_page(current);
+               if (stack_page && sp >= stack_page &&
+                   sp <= stack_page + THREAD_SIZE - 32)
+                       save_raw_perf_callchain(entry, sp);
+               return;
+       }
+       do {
+               callchain_store(entry, pc);
+               if (entry->nr >= PERF_MAX_STACK_DEPTH)
+                       break;
+               pc = unwind_stack(current, &sp, pc, &ra);
+       } while (pc);
+#else
+       callchain_store(entry, PERF_CONTEXT_KERNEL);
+       save_raw_perf_callchain(entry, sp);
+#endif
+}
+
+static void
+perf_do_callchain(struct pt_regs *regs,
+                 struct perf_callchain_entry *entry)
+{
+       int is_user;
+
+       if (!regs)
+               return;
+
+       is_user = user_mode(regs);
+
+       if (!current || !current->pid)
+               return;
+
+       if (is_user && current->state != TASK_RUNNING)
+               return;
+
+       if (!is_user) {
+               perf_callchain_kernel(regs, entry);
+               if (current->mm)
+                       regs = task_pt_regs(current);
+               else
+                       regs = NULL;
+       }
+       if (regs)
+               perf_callchain_user(regs, entry);
+}
+
+static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
+
+struct perf_callchain_entry *
+perf_callchain(struct pt_regs *regs)
+{
+       struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
+
+       entry->nr = 0;
+       perf_do_callchain(regs, entry);
+       return entry;
+}