]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
Merge branches 'tracing/ftrace', 'tracing/markers', 'tracing/mmiotrace', 'tracing...
authorIngo Molnar <mingo@elte.hu>
Mon, 3 Nov 2008 09:34:23 +0000 (10:34 +0100)
committerIngo Molnar <mingo@elte.hu>
Mon, 3 Nov 2008 09:34:23 +0000 (10:34 +0100)
19 files changed:
Documentation/kernel-parameters.txt
arch/arm/include/asm/ftrace.h
arch/powerpc/include/asm/ftrace.h
arch/sh/include/asm/ftrace.h
arch/sparc/include/asm/ftrace.h
arch/x86/Kconfig.debug
arch/x86/include/asm/ftrace.h
arch/x86/kernel/ftrace.c
arch/x86/mm/Makefile
arch/x86/mm/fault.c
include/linux/ftrace.h
include/linux/hardirq.h
include/linux/marker.h
kernel/marker.c
kernel/sysctl.c
kernel/trace/Kconfig
kernel/trace/trace.c
scripts/Makefile.build
scripts/tracing/draw_functrace.py [new file with mode: 0644]

index 1bbcaa8982b6c2445fcfdd852f9e33702129cd8b..4862284d31191841fcde8c2b6aa803c43b741117 100644 (file)
@@ -765,6 +765,14 @@ and is between 256 and 4096 characters. It is defined in the file
                        parameter will force ia64_sal_cache_flush to call
                        ia64_pal_cache_flush instead of SAL_CACHE_FLUSH.
 
+       ftrace=[tracer]
+                       [ftrace] will set and start the specified tracer
+                       as early as possible in order to facilitate early
+                       boot debugging.
+
+       ftrace_dump_on_oops
+                       [ftrace] will dump the trace buffers on oops.
+
        gamecon.map[2|3]=
                        [HW,JOY] Multisystem joystick and NES/SNES/PSX pad
                        support via parallel port (up to 5 devices per port)
index 39c8bc1a006afbfbca896948ae86c4268e7ed52e..3f3a1d1508eaff5b326e2ca6045675eb010474f4 100644 (file)
@@ -1,6 +1,11 @@
 #ifndef _ASM_ARM_FTRACE
 #define _ASM_ARM_FTRACE
 
+#ifndef __ASSEMBLY__
+static inline void ftrace_nmi_enter(void) { }
+static inline void ftrace_nmi_exit(void) { }
+#endif
+
 #ifdef CONFIG_FUNCTION_TRACER
 #define MCOUNT_ADDR            ((long)(mcount))
 #define MCOUNT_INSN_SIZE       4 /* sizeof mcount call */
index b298f7a631e6cd9620094c72da4c82832afcdb43..1cd72700fbc046077b7f29875c6bd191550af92d 100644 (file)
@@ -1,6 +1,11 @@
 #ifndef _ASM_POWERPC_FTRACE
 #define _ASM_POWERPC_FTRACE
 
+#ifndef __ASSEMBLY__
+static inline void ftrace_nmi_enter(void) { }
+static inline void ftrace_nmi_exit(void) { }
+#endif
+
 #ifdef CONFIG_FUNCTION_TRACER
 #define MCOUNT_ADDR            ((long)(_mcount))
 #define MCOUNT_INSN_SIZE       4 /* sizeof mcount call */
index 3aed362c9463a6ccb492f6ef20ae4bb14ce27a75..31ada0370cb602283850e94951491663115d8ab6 100644 (file)
@@ -1,6 +1,11 @@
 #ifndef __ASM_SH_FTRACE_H
 #define __ASM_SH_FTRACE_H
 
+#ifndef __ASSEMBLY__
+static inline void ftrace_nmi_enter(void) { }
+static inline void ftrace_nmi_exit(void) { }
+#endif
+
 #ifndef __ASSEMBLY__
 extern void mcount(void);
 #endif
index d27716cd38c1ac0243a74104f3106850d24397b1..62055ac0496e99af74ef0c5586ed5f5810a521d3 100644 (file)
@@ -1,6 +1,11 @@
 #ifndef _ASM_SPARC64_FTRACE
 #define _ASM_SPARC64_FTRACE
 
+#ifndef __ASSEMBLY__
+static inline void ftrace_nmi_enter(void) { }
+static inline void ftrace_nmi_exit(void) { }
+#endif
+
 #ifdef CONFIG_MCOUNT
 #define MCOUNT_ADDR            ((long)(_mcount))
 #define MCOUNT_INSN_SIZE       4 /* sizeof mcount call */
index 2a3dfbd5e677b548e5e75f43da69e934b90a7eff..fa013f529b746564fd04695bd4a12fe65c401dca 100644 (file)
@@ -186,14 +186,10 @@ config IOMMU_LEAK
          Add a simple leak tracer to the IOMMU code. This is useful when you
          are debugging a buggy device driver that leaks IOMMU mappings.
 
-config MMIOTRACE_HOOKS
-       bool
-
 config MMIOTRACE
        bool "Memory mapped IO tracing"
        depends on DEBUG_KERNEL && PCI
        select TRACING
-       select MMIOTRACE_HOOKS
        help
          Mmiotrace traces Memory Mapped I/O access and is meant for
          debugging and reverse engineering. It is called from the ioremap
index 9e8bc29b8b17dd3739d7479af6920c3c627130cb..a23468194b8c346f2fa463d6ec57d9f05938d20f 100644 (file)
@@ -17,6 +17,21 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
         */
        return addr - 1;
 }
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern void ftrace_nmi_enter(void);
+extern void ftrace_nmi_exit(void);
+#else
+static inline void ftrace_nmi_enter(void) { }
+static inline void ftrace_nmi_exit(void) { }
+#endif
+#endif /* __ASSEMBLY__ */
+
+#else /* CONFIG_FUNCTION_TRACER */
+
+#ifndef __ASSEMBLY__
+static inline void ftrace_nmi_enter(void) { }
+static inline void ftrace_nmi_exit(void) { }
 #endif
 
 #endif /* CONFIG_FUNCTION_TRACER */
index 50ea0ac8c9bf2c27a53323b93b5d473bd7e1d028..69149337f2fe68ab47ac879a578c306dc5ded5d4 100644 (file)
@@ -56,6 +56,133 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
        return calc.code;
 }
 
+/*
+ * Modifying code must take extra care. On an SMP machine, if
+ * the code being modified is also being executed on another CPU
+ * that CPU will have undefined results and possibly take a GPF.
+ * We use kstop_machine to stop other CPUS from exectuing code.
+ * But this does not stop NMIs from happening. We still need
+ * to protect against that. We separate out the modification of
+ * the code to take care of this.
+ *
+ * Two buffers are added: An IP buffer and a "code" buffer.
+ *
+ * 1) Put the instruction pointer into the IP buffer
+ *    and the new code into the "code" buffer.
+ * 2) Set a flag that says we are modifying code
+ * 3) Wait for any running NMIs to finish.
+ * 4) Write the code
+ * 5) clear the flag.
+ * 6) Wait for any running NMIs to finish.
+ *
+ * If an NMI is executed, the first thing it does is to call
+ * "ftrace_nmi_enter". This will check if the flag is set to write
+ * and if it is, it will write what is in the IP and "code" buffers.
+ *
+ * The trick is, it does not matter if everyone is writing the same
+ * content to the code location. Also, if a CPU is executing code
+ * it is OK to write to that code location if the contents being written
+ * are the same as what exists.
+ */
+
+static atomic_t in_nmi = ATOMIC_INIT(0);
+static int mod_code_status;            /* holds return value of text write */
+static int mod_code_write;             /* set when NMI should do the write */
+static void *mod_code_ip;              /* holds the IP to write to */
+static void *mod_code_newcode;         /* holds the text to write to the IP */
+
+static unsigned nmi_wait_count;
+static atomic_t nmi_update_count = ATOMIC_INIT(0);
+
+int ftrace_arch_read_dyn_info(char *buf, int size)
+{
+       int r;
+
+       r = snprintf(buf, size, "%u %u",
+                    nmi_wait_count,
+                    atomic_read(&nmi_update_count));
+       return r;
+}
+
+static void ftrace_mod_code(void)
+{
+       /*
+        * Yes, more than one CPU process can be writing to mod_code_status.
+        *    (and the code itself)
+        * But if one were to fail, then they all should, and if one were
+        * to succeed, then they all should.
+        */
+       mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
+                                            MCOUNT_INSN_SIZE);
+
+}
+
+void ftrace_nmi_enter(void)
+{
+       atomic_inc(&in_nmi);
+       /* Must have in_nmi seen before reading write flag */
+       smp_mb();
+       if (mod_code_write) {
+               ftrace_mod_code();
+               atomic_inc(&nmi_update_count);
+       }
+}
+
+void ftrace_nmi_exit(void)
+{
+       /* Finish all executions before clearing in_nmi */
+       smp_wmb();
+       atomic_dec(&in_nmi);
+}
+
+static void wait_for_nmi(void)
+{
+       int waited = 0;
+
+       while (atomic_read(&in_nmi)) {
+               waited = 1;
+               cpu_relax();
+       }
+
+       if (waited)
+               nmi_wait_count++;
+}
+
+static int
+do_ftrace_mod_code(unsigned long ip, void *new_code)
+{
+       mod_code_ip = (void *)ip;
+       mod_code_newcode = new_code;
+
+       /* The buffers need to be visible before we let NMIs write them */
+       smp_wmb();
+
+       mod_code_write = 1;
+
+       /* Make sure write bit is visible before we wait on NMIs */
+       smp_mb();
+
+       wait_for_nmi();
+
+       /* Make sure all running NMIs have finished before we write the code */
+       smp_mb();
+
+       ftrace_mod_code();
+
+       /* Make sure the write happens before clearing the bit */
+       smp_wmb();
+
+       mod_code_write = 0;
+
+       /* make sure NMIs see the cleared bit */
+       smp_mb();
+
+       wait_for_nmi();
+
+       return mod_code_status;
+}
+
+
 int
 ftrace_modify_code(unsigned long ip, unsigned char *old_code,
                   unsigned char *new_code)
@@ -81,7 +208,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
                return -EINVAL;
 
        /* replace the text with the new text */
-       if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
+       if (do_ftrace_mod_code(ip, new_code))
                return -EPERM;
 
        sync_core();
index 59f89b434b45fbb80df02bafe3f54122a8738b50..0a21b7aab9dc404bc333f3d2bd680f4e3bb10fa6 100644 (file)
@@ -8,9 +8,8 @@ obj-$(CONFIG_X86_PTDUMP)        += dump_pagetables.o
 
 obj-$(CONFIG_HIGHMEM)          += highmem_32.o
 
-obj-$(CONFIG_MMIOTRACE_HOOKS)  += kmmio.o
 obj-$(CONFIG_MMIOTRACE)                += mmiotrace.o
-mmiotrace-y                    := pf_in.o mmio-mod.o
+mmiotrace-y                    := kmmio.o pf_in.o mmio-mod.o
 obj-$(CONFIG_MMIOTRACE_TEST)   += testmmiotrace.o
 
 obj-$(CONFIG_NUMA)             += numa_$(BITS).o
index 31e8730fa2463214f36c2f6b3df9d0f75f6be346..4152d3c3b13801c5dc2854ae614ebbba24263a4d 100644 (file)
@@ -53,7 +53,7 @@
 
 static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
 {
-#ifdef CONFIG_MMIOTRACE_HOOKS
+#ifdef CONFIG_MMIOTRACE
        if (unlikely(is_kmmio_active()))
                if (kmmio_handler(regs, addr) == 1)
                        return -1;
index 703eb53cfa2b2a1512b7ce97d9c1da218ad9ac0f..e46a7b34037cc29ed0dbf5ded44e694badff8957 100644 (file)
@@ -74,6 +74,9 @@ extern void ftrace_caller(void);
 extern void ftrace_call(void);
 extern void mcount_call(void);
 
+/* May be defined in arch */
+extern int ftrace_arch_read_dyn_info(char *buf, int size);
+
 /**
  * ftrace_modify_code - modify code segment
  * @ip: the address of the code segment
@@ -181,6 +184,8 @@ static inline void __ftrace_enabled_restore(int enabled)
 #endif
 
 #ifdef CONFIG_TRACING
+extern int ftrace_dump_on_oops;
+
 extern void
 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
 
index 181006cc94a03ecd29630d80d91762589fc1c316..0087cb43becf9059676f3396f5297aab796b3968 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/smp_lock.h>
 #include <linux/lockdep.h>
 #include <asm/hardirq.h>
+#include <asm/ftrace.h>
 #include <asm/system.h>
 
 /*
@@ -161,7 +162,17 @@ extern void irq_enter(void);
  */
 extern void irq_exit(void);
 
-#define nmi_enter()            do { lockdep_off(); __irq_enter(); } while (0)
-#define nmi_exit()             do { __irq_exit(); lockdep_on(); } while (0)
+#define nmi_enter()                            \
+       do {                                    \
+               ftrace_nmi_enter();             \
+               lockdep_off();                  \
+               __irq_enter();                  \
+       } while (0)
+#define nmi_exit()                             \
+       do {                                    \
+               __irq_exit();                   \
+               lockdep_on();                   \
+               ftrace_nmi_exit();              \
+       } while (0)
 
 #endif /* LINUX_HARDIRQ_H */
index 889196c7fbb1e77cc5b4561e2b0b7f937b864434..4cf45472d9f53fae95a1c4e98a34339598d41c4b 100644 (file)
@@ -136,8 +136,6 @@ extern marker_probe_func __mark_empty_function;
 
 extern void marker_probe_cb(const struct marker *mdata,
        void *call_private, ...);
-extern void marker_probe_cb_noarg(const struct marker *mdata,
-       void *call_private, ...);
 
 /*
  * Connect a probe to a marker.
index e9c6b2bc9400627cf183382ee55933333f0ee83b..2898b647d415cc4e35d512752548a8834f08ffe6 100644 (file)
@@ -43,6 +43,7 @@ static DEFINE_MUTEX(markers_mutex);
  */
 #define MARKER_HASH_BITS 6
 #define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS)
+static struct hlist_head marker_table[MARKER_TABLE_SIZE];
 
 /*
  * Note about RCU :
@@ -64,11 +65,10 @@ struct marker_entry {
        void *oldptr;
        int rcu_pending;
        unsigned char ptype:1;
+       unsigned char format_allocated:1;
        char name[0];   /* Contains name'\0'format'\0' */
 };
 
-static struct hlist_head marker_table[MARKER_TABLE_SIZE];
-
 /**
  * __mark_empty_function - Empty probe callback
  * @probe_private: probe private data
@@ -157,7 +157,7 @@ EXPORT_SYMBOL_GPL(marker_probe_cb);
  *
  * Should be connected to markers "MARK_NOARGS".
  */
-void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
+static void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
 {
        va_list args;   /* not initialized */
        char ptype;
@@ -197,7 +197,6 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
        }
        rcu_read_unlock_sched();
 }
-EXPORT_SYMBOL_GPL(marker_probe_cb_noarg);
 
 static void free_old_closure(struct rcu_head *head)
 {
@@ -416,6 +415,7 @@ static struct marker_entry *add_marker(const char *name, const char *format)
        e->single.probe_private = NULL;
        e->multi = NULL;
        e->ptype = 0;
+       e->format_allocated = 0;
        e->refcount = 0;
        e->rcu_pending = 0;
        hlist_add_head(&e->hlist, head);
@@ -447,6 +447,8 @@ static int remove_marker(const char *name)
        if (e->single.func != __mark_empty_function)
                return -EBUSY;
        hlist_del(&e->hlist);
+       if (e->format_allocated)
+               kfree(e->format);
        /* Make sure the call_rcu has been executed */
        if (e->rcu_pending)
                rcu_barrier_sched();
@@ -457,57 +459,34 @@ static int remove_marker(const char *name)
 /*
  * Set the mark_entry format to the format found in the element.
  */
-static int marker_set_format(struct marker_entry **entry, const char *format)
+static int marker_set_format(struct marker_entry *entry, const char *format)
 {
-       struct marker_entry *e;
-       size_t name_len = strlen((*entry)->name) + 1;
-       size_t format_len = strlen(format) + 1;
-
-
-       e = kmalloc(sizeof(struct marker_entry) + name_len + format_len,
-                       GFP_KERNEL);
-       if (!e)
+       entry->format = kstrdup(format, GFP_KERNEL);
+       if (!entry->format)
                return -ENOMEM;
-       memcpy(&e->name[0], (*entry)->name, name_len);
-       e->format = &e->name[name_len];
-       memcpy(e->format, format, format_len);
-       if (strcmp(e->format, MARK_NOARGS) == 0)
-               e->call = marker_probe_cb_noarg;
-       else
-               e->call = marker_probe_cb;
-       e->single = (*entry)->single;
-       e->multi = (*entry)->multi;
-       e->ptype = (*entry)->ptype;
-       e->refcount = (*entry)->refcount;
-       e->rcu_pending = 0;
-       hlist_add_before(&e->hlist, &(*entry)->hlist);
-       hlist_del(&(*entry)->hlist);
-       /* Make sure the call_rcu has been executed */
-       if ((*entry)->rcu_pending)
-               rcu_barrier_sched();
-       kfree(*entry);
-       *entry = e;
+       entry->format_allocated = 1;
+
        trace_mark(core_marker_format, "name %s format %s",
-                       e->name, e->format);
+                       entry->name, entry->format);
        return 0;
 }
 
 /*
  * Sets the probe callback corresponding to one marker.
  */
-static int set_marker(struct marker_entry **entry, struct marker *elem,
+static int set_marker(struct marker_entry *entry, struct marker *elem,
                int active)
 {
        int ret;
-       WARN_ON(strcmp((*entry)->name, elem->name) != 0);
+       WARN_ON(strcmp(entry->name, elem->name) != 0);
 
-       if ((*entry)->format) {
-               if (strcmp((*entry)->format, elem->format) != 0) {
+       if (entry->format) {
+               if (strcmp(entry->format, elem->format) != 0) {
                        printk(KERN_NOTICE
                                "Format mismatch for probe %s "
                                "(%s), marker (%s)\n",
-                               (*entry)->name,
-                               (*entry)->format,
+                               entry->name,
+                               entry->format,
                                elem->format);
                        return -EPERM;
                }
@@ -523,34 +502,33 @@ static int set_marker(struct marker_entry **entry, struct marker *elem,
         * pass from a "safe" callback (with argument) to an "unsafe"
         * callback (does not set arguments).
         */
-       elem->call = (*entry)->call;
+       elem->call = entry->call;
        /*
         * Sanity check :
         * We only update the single probe private data when the ptr is
         * set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1)
         */
        WARN_ON(elem->single.func != __mark_empty_function
-               && elem->single.probe_private
-               != (*entry)->single.probe_private &&
-               !elem->ptype);
-       elem->single.probe_private = (*entry)->single.probe_private;
+               && elem->single.probe_private != entry->single.probe_private
+               && !elem->ptype);
+       elem->single.probe_private = entry->single.probe_private;
        /*
         * Make sure the private data is valid when we update the
         * single probe ptr.
         */
        smp_wmb();
-       elem->single.func = (*entry)->single.func;
+       elem->single.func = entry->single.func;
        /*
         * We also make sure that the new probe callbacks array is consistent
         * before setting a pointer to it.
         */
-       rcu_assign_pointer(elem->multi, (*entry)->multi);
+       rcu_assign_pointer(elem->multi, entry->multi);
        /*
         * Update the function or multi probe array pointer before setting the
         * ptype.
         */
        smp_wmb();
-       elem->ptype = (*entry)->ptype;
+       elem->ptype = entry->ptype;
        elem->state = active;
 
        return 0;
@@ -594,8 +572,7 @@ void marker_update_probe_range(struct marker *begin,
        for (iter = begin; iter < end; iter++) {
                mark_entry = get_marker(iter->name);
                if (mark_entry) {
-                       set_marker(&mark_entry, iter,
-                                       !!mark_entry->refcount);
+                       set_marker(mark_entry, iter, !!mark_entry->refcount);
                        /*
                         * ignore error, continue
                         */
@@ -657,7 +634,7 @@ int marker_probe_register(const char *name, const char *format,
                        ret = PTR_ERR(entry);
        } else if (format) {
                if (!entry->format)
-                       ret = marker_set_format(&entry, format);
+                       ret = marker_set_format(entry, format);
                else if (strcmp(entry->format, format))
                        ret = -EPERM;
        }
@@ -848,8 +825,6 @@ void *marker_get_private_data(const char *name, marker_probe_func *probe,
                        if (!e->ptype) {
                                if (num == 0 && e->single.func == probe)
                                        return e->single.probe_private;
-                               else
-                                       break;
                        } else {
                                struct marker_probe_closure *closure;
                                int match = 0;
@@ -861,6 +836,7 @@ void *marker_get_private_data(const char *name, marker_probe_func *probe,
                                                return closure[i].probe_private;
                                }
                        }
+                       break;
                }
        }
        return ERR_PTR(-ENOENT);
index 9d048fa2d902ec2b768633afd4ecbaaafc01ec03..6b6b727258b52d36b2f0cda8c804f4fa8d59e62b 100644 (file)
@@ -484,6 +484,16 @@ static struct ctl_table kern_table[] = {
                .proc_handler   = &ftrace_enable_sysctl,
        },
 #endif
+#ifdef CONFIG_TRACING
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "ftrace_dump_on_opps",
+               .data           = &ftrace_dump_on_oops,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec,
+       },
+#endif
 #ifdef CONFIG_MODULES
        {
                .ctl_name       = KERN_MODPROBE,
index b58f43bec3636e050bc6129d1fa58341aac1c946..33dbefd471e88f9571f299f92b433188dd6697de 100644 (file)
@@ -25,7 +25,7 @@ config TRACING
        bool
        select DEBUG_FS
        select RING_BUFFER
-       select STACKTRACE
+       select STACKTRACE if STACKTRACE_SUPPORT
        select TRACEPOINTS
        select NOP_TRACER
 
index 8a499e2adaec2554218051c730f77ccc784ef184..e4c40c868d67fb0f3028d5bf2ef3f806b23ab62f 100644 (file)
@@ -64,6 +64,37 @@ static cpumask_t __read_mostly               tracing_buffer_mask;
 
 static int tracing_disabled = 1;
 
+/*
+ * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
+ *
+ * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
+ * is set, then ftrace_dump is called. This will output the contents
+ * of the ftrace buffers to the console.  This is very useful for
+ * capturing traces that lead to crashes and outputing it to a
+ * serial console.
+ *
+ * It is default off, but you can enable it with either specifying
+ * "ftrace_dump_on_oops" in the kernel command line, or setting
+ * /proc/sys/kernel/ftrace_dump_on_oops to true.
+ */
+int ftrace_dump_on_oops;
+
+static int tracing_set_tracer(char *buf);
+
+static int __init set_ftrace(char *str)
+{
+       tracing_set_tracer(str);
+       return 1;
+}
+__setup("ftrace", set_ftrace);
+
+static int __init set_ftrace_dump_on_oops(char *str)
+{
+       ftrace_dump_on_oops = 1;
+       return 1;
+}
+__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
+
 long
 ns2usecs(cycle_t nsec)
 {
@@ -705,6 +736,7 @@ static void ftrace_trace_stack(struct trace_array *tr,
                               unsigned long flags,
                               int skip, int pc)
 {
+#ifdef CONFIG_STACKTRACE
        struct ring_buffer_event *event;
        struct stack_entry *entry;
        struct stack_trace trace;
@@ -730,6 +762,7 @@ static void ftrace_trace_stack(struct trace_array *tr,
 
        save_stack_trace(&trace);
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+#endif
 }
 
 void __trace_stack(struct trace_array *tr,
@@ -2372,29 +2405,11 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 }
 
-static ssize_t
-tracing_set_trace_write(struct file *filp, const char __user *ubuf,
-                       size_t cnt, loff_t *ppos)
+static int tracing_set_tracer(char *buf)
 {
        struct trace_array *tr = &global_trace;
        struct tracer *t;
-       char buf[max_tracer_type_len+1];
-       int i;
-       size_t ret;
-
-       ret = cnt;
-
-       if (cnt > max_tracer_type_len)
-               cnt = max_tracer_type_len;
-
-       if (copy_from_user(&buf, ubuf, cnt))
-               return -EFAULT;
-
-       buf[cnt] = 0;
-
-       /* strip ending whitespace. */
-       for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
-               buf[i] = 0;
+       int ret = 0;
 
        mutex_lock(&trace_types_lock);
        for (t = trace_types; t; t = t->next) {
@@ -2418,6 +2433,33 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
  out:
        mutex_unlock(&trace_types_lock);
 
+       return ret;
+}
+
+static ssize_t
+tracing_set_trace_write(struct file *filp, const char __user *ubuf,
+                       size_t cnt, loff_t *ppos)
+{
+       char buf[max_tracer_type_len+1];
+       int i;
+       size_t ret;
+
+       if (cnt > max_tracer_type_len)
+               cnt = max_tracer_type_len;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+
+       /* strip ending whitespace. */
+       for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
+               buf[i] = 0;
+
+       ret = tracing_set_tracer(buf);
+       if (!ret)
+               ret = cnt;
+
        if (ret > 0)
                filp->f_pos += ret;
 
@@ -2820,22 +2862,38 @@ static struct file_operations tracing_mark_fops = {
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 
+int __weak ftrace_arch_read_dyn_info(char *buf, int size)
+{
+       return 0;
+}
+
 static ssize_t
-tracing_read_long(struct file *filp, char __user *ubuf,
+tracing_read_dyn_info(struct file *filp, char __user *ubuf,
                  size_t cnt, loff_t *ppos)
 {
+       static char ftrace_dyn_info_buffer[1024];
+       static DEFINE_MUTEX(dyn_info_mutex);
        unsigned long *p = filp->private_data;
-       char buf[64];
+       char *buf = ftrace_dyn_info_buffer;
+       int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
        int r;
 
-       r = sprintf(buf, "%ld\n", *p);
+       mutex_lock(&dyn_info_mutex);
+       r = sprintf(buf, "%ld ", *p);
 
-       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+       r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
+       buf[r++] = '\n';
+
+       r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+
+       mutex_unlock(&dyn_info_mutex);
+
+       return r;
 }
 
-static struct file_operations tracing_read_long_fops = {
+static struct file_operations tracing_dyn_info_fops = {
        .open           = tracing_open_generic,
-       .read           = tracing_read_long,
+       .read           = tracing_read_dyn_info,
 };
 #endif
 
@@ -2944,7 +3002,7 @@ static __init int tracer_init_debugfs(void)
 #ifdef CONFIG_DYNAMIC_FTRACE
        entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
                                    &ftrace_update_tot_cnt,
-                                   &tracing_read_long_fops);
+                                   &tracing_dyn_info_fops);
        if (!entry)
                pr_warning("Could not create debugfs "
                           "'dyn_ftrace_total_info' entry\n");
@@ -3025,7 +3083,8 @@ EXPORT_SYMBOL_GPL(__ftrace_printk);
 static int trace_panic_handler(struct notifier_block *this,
                               unsigned long event, void *unused)
 {
-       ftrace_dump();
+       if (ftrace_dump_on_oops)
+               ftrace_dump();
        return NOTIFY_OK;
 }
 
@@ -3041,7 +3100,8 @@ static int trace_die_handler(struct notifier_block *self,
 {
        switch (val) {
        case DIE_OOPS:
-               ftrace_dump();
+               if (ftrace_dump_on_oops)
+                       ftrace_dump();
                break;
        default:
                break;
@@ -3082,7 +3142,6 @@ trace_printk_seq(struct trace_seq *s)
        trace_seq_reset(s);
 }
 
-
 void ftrace_dump(void)
 {
        static DEFINE_SPINLOCK(ftrace_dump_lock);
index 468fbc9016c7b0773db9073a28f655bdf31e372d..7a176773af85a9fee23db5ff6e23edb783c74009 100644 (file)
@@ -198,16 +198,10 @@ cmd_modversions =                                                 \
        fi;
 endif
 
-ifdef CONFIG_64BIT
-arch_bits = 64
-else
-arch_bits = 32
-endif
-
 ifdef CONFIG_FTRACE_MCOUNT_RECORD
-cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl \
-       "$(ARCH)" "$(arch_bits)" "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" \
-       "$(NM)" "$(RM)" "$(MV)" "$(@)";
+cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
+       "$(if $(CONFIG_64BIT),64,32)" \
+       "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" "$(@)";
 endif
 
 define rule_cc_o_c
diff --git a/scripts/tracing/draw_functrace.py b/scripts/tracing/draw_functrace.py
new file mode 100644 (file)
index 0000000..902f9a9
--- /dev/null
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+
+"""
+Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
+Licensed under the terms of the GNU GPL License version 2
+
+This script parses a trace provided by the function tracer in
+kernel/trace/trace_functions.c
+The resulted trace is processed into a tree to produce a more human
+view of the call stack by drawing textual but hierarchical tree of
+calls. Only the functions's names and the the call time are provided.
+
+Usage:
+       Be sure that you have CONFIG_FUNCTION_TRACER
+       # mkdir /debugfs
+       # mount -t debug debug /debug
+       # echo function > /debug/tracing/current_tracer
+       $ cat /debug/tracing/trace_pipe > ~/raw_trace_func
+       Wait some times but not too much, the script is a bit slow.
+       Break the pipe (Ctrl + Z)
+       $ scripts/draw_functrace.py < raw_trace_func > draw_functrace
+       Then you have your drawn trace in draw_functrace
+"""
+
+
+import sys, re
+
+class CallTree:
+       """ This class provides a tree representation of the functions
+               call stack. If a function has no parent in the kernel (interrupt,
+               syscall, kernel thread...) then it is attached to a virtual parent
+               called ROOT.
+       """
+       ROOT = None
+
+       def __init__(self, func, time = None, parent = None):
+               self._func = func
+               self._time = time
+               if parent is None:
+                       self._parent = CallTree.ROOT
+               else:
+                       self._parent = parent
+               self._children = []
+
+       def calls(self, func, calltime):
+               """ If a function calls another one, call this method to insert it
+                       into the tree at the appropriate place.
+                       @return: A reference to the newly created child node.
+               """
+               child = CallTree(func, calltime, self)
+               self._children.append(child)
+               return child
+
+       def getParent(self, func):
+               """ Retrieve the last parent of the current node that
+                       has the name given by func. If this function is not
+                       on a parent, then create it as new child of root
+                       @return: A reference to the parent.
+               """
+               tree = self
+               while tree != CallTree.ROOT and tree._func != func:
+                       tree = tree._parent
+               if tree == CallTree.ROOT:
+                       child = CallTree.ROOT.calls(func, None)
+                       return child
+               return tree
+
+       def __repr__(self):
+               return self.__toString("", True)
+
+       def __toString(self, branch, lastChild):
+               if self._time is not None:
+                       s = "%s----%s (%s)\n" % (branch, self._func, self._time)
+               else:
+                       s = "%s----%s\n" % (branch, self._func)
+
+               i = 0
+               if lastChild:
+                       branch = branch[:-1] + " "
+               while i < len(self._children):
+                       if i != len(self._children) - 1:
+                               s += "%s" % self._children[i].__toString(branch +\
+                                                               "    |", False)
+                       else:
+                               s += "%s" % self._children[i].__toString(branch +\
+                                                               "    |", True)
+                       i += 1
+               return s
+
+class BrokenLineException(Exception):
+       """If the last line is not complete because of the pipe breakage,
+          we want to stop the processing and ignore this line.
+       """
+       pass
+
+class CommentLineException(Exception):
+       """ If the line is a comment (as in the beginning of the trace file),
+           just ignore it.
+       """
+       pass
+
+
+def parseLine(line):
+       line = line.strip()
+       if line.startswith("#"):
+               raise CommentLineException
+       m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
+       if m is None:
+               raise BrokenLineException
+       return (m.group(1), m.group(2), m.group(3))
+
+
+def main():
+       CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
+       tree = CallTree.ROOT
+
+       for line in sys.stdin:
+               try:
+                       calltime, callee, caller = parseLine(line)
+               except BrokenLineException:
+                       break
+               except CommentLineException:
+                       continue
+               tree = tree.getParent(caller)
+               tree = tree.calls(callee, calltime)
+
+       print CallTree.ROOT
+
+if __name__ == "__main__":
+       main()