]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 16 Dec 2009 20:02:25 +0000 (12:02 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 16 Dec 2009 20:02:25 +0000 (12:02 -0800)
* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  tracing: Fix return of trace_dump_stack()
  ksym_tracer: Fix bad cast
  tracing/power: Remove two exports
  tracing: Change event->profile_count to be int type
  tracing: Simplify trace_option_write()
  tracing: Remove useless trace option
  tracing: Use seq file for trace_clock
  tracing: Use seq file for trace_options
  function-graph: Allow writing the same val to set_graph_function
  ftrace: Call trace_parser_clear() properly
  ftrace: Return EINVAL when writing invalid val to set_ftrace_filter
  tracing: Move a printk out of ftrace_raw_reg_event_foo()
  tracing: Pull up calls to trace_define_common_fields()
  tracing: Extract duplicate ftrace_raw_init_event_foo()
  ftrace.h: Use common pr_info fmt string
  tracing: Add stack trace to irqsoff tracer
  tracing: Add trace_dump_stack()
  ring-buffer: Move resize integrity check under reader lock
  ring-buffer: Use sync sched protection on ring buffer resizing
  tracing: Fix wrong usage of strstrip in trace_ksyms

1  2 
include/linux/kernel.h
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h

diff --combined include/linux/kernel.h
index 4d9c916d06d91b2686b42ca68b57c89837f65b9c,f1dc752da0d2a5dc0476ec2ed85b9f5397ec3676..3fc9f5aab5f8fd5f6deb82d464bf78f72940b40d
@@@ -251,10 -251,10 +251,10 @@@ extern int printk_delay_msec
   * Print a one-time message (analogous to WARN_ONCE() et al):
   */
  #define printk_once(x...) ({                  \
 -      static bool __print_once = true;        \
 +      static bool __print_once;               \
                                                \
 -      if (__print_once) {                     \
 -              __print_once = false;           \
 +      if (!__print_once) {                    \
 +              __print_once = true;            \
                printk(x);                      \
        }                                       \
  })
@@@ -397,57 -397,14 +397,57 @@@ static inline char *pack_hex_byte(char 
        printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
  #elif defined(CONFIG_DYNAMIC_DEBUG)
  /* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
 -#define pr_debug(fmt, ...) do { \
 -      dynamic_pr_debug(fmt, ##__VA_ARGS__); \
 -      } while (0)
 +#define pr_debug(fmt, ...) \
 +      dynamic_pr_debug(fmt, ##__VA_ARGS__)
  #else
  #define pr_debug(fmt, ...) \
        ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
  #endif
  
 +/*
 + * ratelimited messages with local ratelimit_state,
 + * no local ratelimit_state used in the !PRINTK case
 + */
 +#ifdef CONFIG_PRINTK
 +#define printk_ratelimited(fmt, ...)  ({              \
 +      static struct ratelimit_state _rs = {           \
 +              .interval = DEFAULT_RATELIMIT_INTERVAL, \
 +              .burst = DEFAULT_RATELIMIT_BURST,       \
 +      };                                              \
 +                                                      \
 +      if (!__ratelimit(&_rs))                         \
 +              printk(fmt, ##__VA_ARGS__);             \
 +})
 +#else
 +/* No effect, but we still get type checking even in the !PRINTK case: */
 +#define printk_ratelimited printk
 +#endif
 +
 +#define pr_emerg_ratelimited(fmt, ...) \
 +      printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
 +#define pr_alert_ratelimited(fmt, ...) \
 +      printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
 +#define pr_crit_ratelimited(fmt, ...) \
 +      printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
 +#define pr_err_ratelimited(fmt, ...) \
 +      printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 +#define pr_warning_ratelimited(fmt, ...) \
 +      printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
 +#define pr_notice_ratelimited(fmt, ...) \
 +      printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
 +#define pr_info_ratelimited(fmt, ...) \
 +      printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
 +/* no pr_cont_ratelimited, don't do that... */
 +/* If you are writing a driver, please use dev_dbg instead */
 +#if defined(DEBUG)
 +#define pr_debug_ratelimited(fmt, ...) \
 +      printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 +#else
 +#define pr_debug_ratelimited(fmt, ...) \
 +      ({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \
 +                                   ##__VA_ARGS__); 0; })
 +#endif
 +
  /*
   * General tracing related utility functions - trace_printk(),
   * tracing_on/tracing_off and tracing_start()/tracing_stop
@@@ -535,6 -492,8 +535,8 @@@ extern in
  __trace_printk(unsigned long ip, const char *fmt, ...)
        __attribute__ ((format (printf, 2, 3)));
  
+ extern void trace_dump_stack(void);
  /*
   * The double __builtin_constant_p is because gcc will give us an error
   * if we try to allocate the static variable to fmt if it is not a
@@@ -568,6 -527,7 +570,7 @@@ trace_printk(const char *fmt, ...) __at
  static inline void tracing_start(void) { }
  static inline void tracing_stop(void) { }
  static inline void ftrace_off_permanent(void) { }
+ static inline void trace_dump_stack(void) { }
  static inline int
  trace_printk(const char *fmt, ...)
  {
index f58c9ad15830777ded600f8af31138b31d17d172,eccb4cf1e998ef8fe178f9a9286ff28217d7cdfb..2326b04c95c493fec48418746c0194980ad4b4fa
@@@ -423,7 -423,7 +423,7 @@@ struct ring_buffer_per_cpu 
        int                             cpu;
        struct ring_buffer              *buffer;
        spinlock_t                      reader_lock;    /* serialize readers */
 -      raw_spinlock_t                  lock;
 +      arch_spinlock_t                 lock;
        struct lock_class_key           lock_key;
        struct list_head                *pages;
        struct buffer_page              *head_page;     /* read from head */
@@@ -998,7 -998,7 +998,7 @@@ rb_allocate_cpu_buffer(struct ring_buff
        cpu_buffer->buffer = buffer;
        spin_lock_init(&cpu_buffer->reader_lock);
        lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
 -      cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
 +      cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  
        bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
                            GFP_KERNEL, cpu_to_node(cpu));
@@@ -1193,9 -1193,6 +1193,6 @@@ rb_remove_pages(struct ring_buffer_per_
        struct list_head *p;
        unsigned i;
  
-       atomic_inc(&cpu_buffer->record_disabled);
-       synchronize_sched();
        spin_lock_irq(&cpu_buffer->reader_lock);
        rb_head_page_deactivate(cpu_buffer);
  
                return;
  
        rb_reset_cpu(cpu_buffer);
-       spin_unlock_irq(&cpu_buffer->reader_lock);
        rb_check_pages(cpu_buffer);
  
-       atomic_dec(&cpu_buffer->record_disabled);
+       spin_unlock_irq(&cpu_buffer->reader_lock);
  }
  
  static void
@@@ -1227,9 -1221,6 +1221,6 @@@ rb_insert_pages(struct ring_buffer_per_
        struct list_head *p;
        unsigned i;
  
-       atomic_inc(&cpu_buffer->record_disabled);
-       synchronize_sched();
        spin_lock_irq(&cpu_buffer->reader_lock);
        rb_head_page_deactivate(cpu_buffer);
  
                list_add_tail(&bpage->list, cpu_buffer->pages);
        }
        rb_reset_cpu(cpu_buffer);
-       spin_unlock_irq(&cpu_buffer->reader_lock);
        rb_check_pages(cpu_buffer);
  
-       atomic_dec(&cpu_buffer->record_disabled);
+       spin_unlock_irq(&cpu_buffer->reader_lock);
  }
  
  /**
   * @buffer: the buffer to resize.
   * @size: the new size.
   *
-  * The tracer is responsible for making sure that the buffer is
-  * not being used while changing the size.
-  * Note: We may be able to change the above requirement by using
-  *  RCU synchronizations.
-  *
   * Minimum size is 2 * BUF_PAGE_SIZE.
   *
   * Returns -1 on failure.
@@@ -1290,6 -1274,11 +1274,11 @@@ int ring_buffer_resize(struct ring_buff
        if (size == buffer_size)
                return size;
  
+       atomic_inc(&buffer->record_disabled);
+       /* Make sure all writers are done with this buffer. */
+       synchronize_sched();
        mutex_lock(&buffer->mutex);
        get_online_cpus();
  
        put_online_cpus();
        mutex_unlock(&buffer->mutex);
  
+       atomic_dec(&buffer->record_disabled);
        return size;
  
   free_pages:
        }
        put_online_cpus();
        mutex_unlock(&buffer->mutex);
+       atomic_dec(&buffer->record_disabled);
        return -ENOMEM;
  
        /*
   out_fail:
        put_online_cpus();
        mutex_unlock(&buffer->mutex);
+       atomic_dec(&buffer->record_disabled);
        return -1;
  }
  EXPORT_SYMBOL_GPL(ring_buffer_resize);
@@@ -2834,7 -2827,7 +2827,7 @@@ rb_get_reader_page(struct ring_buffer_p
        int ret;
  
        local_irq_save(flags);
 -      __raw_spin_lock(&cpu_buffer->lock);
 +      arch_spin_lock(&cpu_buffer->lock);
  
   again:
        /*
        goto again;
  
   out:
 -      __raw_spin_unlock(&cpu_buffer->lock);
 +      arch_spin_unlock(&cpu_buffer->lock);
        local_irq_restore(flags);
  
        return reader;
@@@ -3286,9 -3279,9 +3279,9 @@@ ring_buffer_read_start(struct ring_buff
        synchronize_sched();
  
        spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 -      __raw_spin_lock(&cpu_buffer->lock);
 +      arch_spin_lock(&cpu_buffer->lock);
        rb_iter_reset(iter);
 -      __raw_spin_unlock(&cpu_buffer->lock);
 +      arch_spin_unlock(&cpu_buffer->lock);
        spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  
        return iter;
@@@ -3408,11 -3401,11 +3401,11 @@@ void ring_buffer_reset_cpu(struct ring_
        if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
                goto out;
  
 -      __raw_spin_lock(&cpu_buffer->lock);
 +      arch_spin_lock(&cpu_buffer->lock);
  
        rb_reset_cpu(cpu_buffer);
  
 -      __raw_spin_unlock(&cpu_buffer->lock);
 +      arch_spin_unlock(&cpu_buffer->lock);
  
   out:
        spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
diff --combined kernel/trace/trace.c
index 31118ae16f03faef7f3e5b0fe7301e9ae56fc3b6,ee61915935d55568bc1455d52d0619ab5850e17a..06ba26747d7e223b44ee67935b9ea33ff25752f8
@@@ -86,17 -86,17 +86,17 @@@ static int dummy_set_flag(u32 old_flags
   */
  static int tracing_disabled = 1;
  
 -DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
 +DEFINE_PER_CPU(int, ftrace_cpu_disabled);
  
  static inline void ftrace_disable_cpu(void)
  {
        preempt_disable();
 -      local_inc(&__get_cpu_var(ftrace_cpu_disabled));
 +      __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled));
  }
  
  static inline void ftrace_enable_cpu(void)
  {
 -      local_dec(&__get_cpu_var(ftrace_cpu_disabled));
 +      __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled));
        preempt_enable();
  }
  
@@@ -203,7 -203,7 +203,7 @@@ cycle_t ftrace_now(int cpu
   */
  static struct trace_array     max_tr;
  
 -static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
 +static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
  
  /* tracer_enabled is used to toggle activation of a tracer */
  static int                    tracer_enabled = 1;
@@@ -313,7 -313,6 +313,6 @@@ static const char *trace_options[] = 
        "bin",
        "block",
        "stacktrace",
-       "sched-tree",
        "trace_printk",
        "ftrace_preempt",
        "branch",
@@@ -493,15 -492,15 +492,15 @@@ static ssize_t trace_seq_to_buffer(stru
   * protected by per_cpu spinlocks. But the action of the swap
   * needs its own lock.
   *
 - * This is defined as a raw_spinlock_t in order to help
 + * This is defined as a arch_spinlock_t in order to help
   * with performance when lockdep debugging is enabled.
   *
   * It is also used in other places outside the update_max_tr
   * so it needs to be defined outside of the
   * CONFIG_TRACER_MAX_TRACE.
   */
 -static raw_spinlock_t ftrace_max_lock =
 -      (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
 +static arch_spinlock_t ftrace_max_lock =
 +      (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  
  #ifdef CONFIG_TRACER_MAX_TRACE
  unsigned long __read_mostly   tracing_max_latency;
@@@ -555,13 -554,13 +554,13 @@@ update_max_tr(struct trace_array *tr, s
                return;
  
        WARN_ON_ONCE(!irqs_disabled());
 -      __raw_spin_lock(&ftrace_max_lock);
 +      arch_spin_lock(&ftrace_max_lock);
  
        tr->buffer = max_tr.buffer;
        max_tr.buffer = buf;
  
        __update_max_tr(tr, tsk, cpu);
 -      __raw_spin_unlock(&ftrace_max_lock);
 +      arch_spin_unlock(&ftrace_max_lock);
  }
  
  /**
@@@ -581,7 -580,7 +580,7 @@@ update_max_tr_single(struct trace_arra
                return;
  
        WARN_ON_ONCE(!irqs_disabled());
 -      __raw_spin_lock(&ftrace_max_lock);
 +      arch_spin_lock(&ftrace_max_lock);
  
        ftrace_disable_cpu();
  
        WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
  
        __update_max_tr(tr, tsk, cpu);
 -      __raw_spin_unlock(&ftrace_max_lock);
 +      arch_spin_unlock(&ftrace_max_lock);
  }
  #endif /* CONFIG_TRACER_MAX_TRACE */
  
@@@ -802,7 -801,7 +801,7 @@@ static unsigned map_pid_to_cmdline[PID_
  static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
  static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
  static int cmdline_idx;
 -static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
 +static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
  
  /* temporary disable recording */
  static atomic_t trace_record_cmdline_disabled __read_mostly;
@@@ -915,7 -914,7 +914,7 @@@ static void trace_save_cmdline(struct t
         * nor do we want to disable interrupts,
         * so if we miss here, then better luck next time.
         */
 -      if (!__raw_spin_trylock(&trace_cmdline_lock))
 +      if (!arch_spin_trylock(&trace_cmdline_lock))
                return;
  
        idx = map_pid_to_cmdline[tsk->pid];
  
        memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
  
 -      __raw_spin_unlock(&trace_cmdline_lock);
 +      arch_spin_unlock(&trace_cmdline_lock);
  }
  
  void trace_find_cmdline(int pid, char comm[])
        }
  
        preempt_disable();
 -      __raw_spin_lock(&trace_cmdline_lock);
 +      arch_spin_lock(&trace_cmdline_lock);
        map = map_pid_to_cmdline[pid];
        if (map != NO_CMDLINE_MAP)
                strcpy(comm, saved_cmdlines[map]);
        else
                strcpy(comm, "<...>");
  
 -      __raw_spin_unlock(&trace_cmdline_lock);
 +      arch_spin_unlock(&trace_cmdline_lock);
        preempt_enable();
  }
  
@@@ -1085,7 -1084,7 +1084,7 @@@ trace_function(struct trace_array *tr
        struct ftrace_entry *entry;
  
        /* If we are reading the ring buffer, don't trace */
 -      if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
 +      if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
                return;
  
        event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
@@@ -1151,6 -1150,22 +1150,22 @@@ void __trace_stack(struct trace_array *
        __ftrace_trace_stack(tr->buffer, flags, skip, pc);
  }
  
+ /**
+  * trace_dump_stack - record a stack back trace in the trace buffer
+  */
+ void trace_dump_stack(void)
+ {
+       unsigned long flags;
+       if (tracing_disabled || tracing_selftest_running)
+               return;
+       local_save_flags(flags);
+       /* skipping 3 traces, seems to get us at the caller of this function */
+       __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
+ }
  void
  ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
  {
@@@ -1251,8 -1266,8 +1266,8 @@@ ftrace_special(unsigned long arg1, unsi
   */
  int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
  {
 -      static raw_spinlock_t trace_buf_lock =
 -              (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
 +      static arch_spinlock_t trace_buf_lock =
 +              (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
        static u32 trace_buf[TRACE_BUF_SIZE];
  
        struct ftrace_event_call *call = &event_bprint;
  
        /* Lockdep uses trace_printk for lock tracing */
        local_irq_save(flags);
 -      __raw_spin_lock(&trace_buf_lock);
 +      arch_spin_lock(&trace_buf_lock);
        len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
  
        if (len > TRACE_BUF_SIZE || len < 0)
                ring_buffer_unlock_commit(buffer, event);
  
  out_unlock:
 -      __raw_spin_unlock(&trace_buf_lock);
 +      arch_spin_unlock(&trace_buf_lock);
        local_irq_restore(flags);
  
  out:
@@@ -1334,7 -1349,7 +1349,7 @@@ int trace_array_printk(struct trace_arr
  int trace_array_vprintk(struct trace_array *tr,
                        unsigned long ip, const char *fmt, va_list args)
  {
 -      static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
 +      static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
        static char trace_buf[TRACE_BUF_SIZE];
  
        struct ftrace_event_call *call = &event_print;
  
        pause_graph_tracing();
        raw_local_irq_save(irq_flags);
 -      __raw_spin_lock(&trace_buf_lock);
 +      arch_spin_lock(&trace_buf_lock);
        len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
  
        size = sizeof(*entry) + len + 1;
                ring_buffer_unlock_commit(buffer, event);
  
   out_unlock:
 -      __raw_spin_unlock(&trace_buf_lock);
 +      arch_spin_unlock(&trace_buf_lock);
        raw_local_irq_restore(irq_flags);
        unpause_graph_tracing();
   out:
@@@ -2279,7 -2294,7 +2294,7 @@@ tracing_cpumask_write(struct file *filp
        mutex_lock(&tracing_cpumask_update_lock);
  
        local_irq_disable();
 -      __raw_spin_lock(&ftrace_max_lock);
 +      arch_spin_lock(&ftrace_max_lock);
        for_each_tracing_cpu(cpu) {
                /*
                 * Increase/decrease the disabled counter if we are
                        atomic_dec(&global_trace.data[cpu]->disabled);
                }
        }
 -      __raw_spin_unlock(&ftrace_max_lock);
 +      arch_spin_unlock(&ftrace_max_lock);
        local_irq_enable();
  
        cpumask_copy(tracing_cpumask, tracing_cpumask_new);
@@@ -2316,67 -2331,49 +2331,49 @@@ static const struct file_operations tra
        .write          = tracing_cpumask_write,
  };
  
- static ssize_t
- tracing_trace_options_read(struct file *filp, char __user *ubuf,
-                      size_t cnt, loff_t *ppos)
+ static int tracing_trace_options_show(struct seq_file *m, void *v)
  {
        struct tracer_opt *trace_opts;
        u32 tracer_flags;
-       int len = 0;
-       char *buf;
-       int r = 0;
        int i;
  
-       /* calculate max size */
-       for (i = 0; trace_options[i]; i++) {
-               len += strlen(trace_options[i]);
-               len += 3; /* "no" and newline */
-       }
        mutex_lock(&trace_types_lock);
        tracer_flags = current_trace->flags->val;
        trace_opts = current_trace->flags->opts;
  
-       /*
-        * Increase the size with names of options specific
-        * of the current tracer.
-        */
-       for (i = 0; trace_opts[i].name; i++) {
-               len += strlen(trace_opts[i].name);
-               len += 3; /* "no" and newline */
-       }
-       /* +1 for \0 */
-       buf = kmalloc(len + 1, GFP_KERNEL);
-       if (!buf) {
-               mutex_unlock(&trace_types_lock);
-               return -ENOMEM;
-       }
        for (i = 0; trace_options[i]; i++) {
                if (trace_flags & (1 << i))
-                       r += sprintf(buf + r, "%s\n", trace_options[i]);
+                       seq_printf(m, "%s\n", trace_options[i]);
                else
-                       r += sprintf(buf + r, "no%s\n", trace_options[i]);
+                       seq_printf(m, "no%s\n", trace_options[i]);
        }
  
        for (i = 0; trace_opts[i].name; i++) {
                if (tracer_flags & trace_opts[i].bit)
-                       r += sprintf(buf + r, "%s\n",
-                               trace_opts[i].name);
+                       seq_printf(m, "%s\n", trace_opts[i].name);
                else
-                       r += sprintf(buf + r, "no%s\n",
-                               trace_opts[i].name);
+                       seq_printf(m, "no%s\n", trace_opts[i].name);
        }
        mutex_unlock(&trace_types_lock);
  
-       WARN_ON(r >= len + 1);
+       return 0;
+ }
  
-       r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+ static int __set_tracer_option(struct tracer *trace,
+                              struct tracer_flags *tracer_flags,
+                              struct tracer_opt *opts, int neg)
+ {
+       int ret;
  
-       kfree(buf);
-       return r;
+       ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
+       if (ret)
+               return ret;
+       if (neg)
+               tracer_flags->val &= ~opts->bit;
+       else
+               tracer_flags->val |= opts->bit;
+       return 0;
  }
  
  /* Try to assign a tracer specific option */
@@@ -2384,33 -2381,17 +2381,17 @@@ static int set_tracer_option(struct tra
  {
        struct tracer_flags *tracer_flags = trace->flags;
        struct tracer_opt *opts = NULL;
-       int ret = 0, i = 0;
-       int len;
+       int i;
  
        for (i = 0; tracer_flags->opts[i].name; i++) {
                opts = &tracer_flags->opts[i];
-               len = strlen(opts->name);
  
-               if (strncmp(cmp, opts->name, len) == 0) {
-                       ret = trace->set_flag(tracer_flags->val,
-                               opts->bit, !neg);
-                       break;
-               }
+               if (strcmp(cmp, opts->name) == 0)
+                       return __set_tracer_option(trace, trace->flags,
+                                                  opts, neg);
        }
-       /* Not found */
-       if (!tracer_flags->opts[i].name)
-               return -EINVAL;
-       /* Refused to handle */
-       if (ret)
-               return ret;
-       if (neg)
-               tracer_flags->val &= ~opts->bit;
-       else
-               tracer_flags->val |= opts->bit;
  
-       return 0;
+       return -EINVAL;
  }
  
  static void set_tracer_flags(unsigned int mask, int enabled)
@@@ -2430,7 -2411,7 +2411,7 @@@ tracing_trace_options_write(struct fil
                        size_t cnt, loff_t *ppos)
  {
        char buf[64];
-       char *cmp = buf;
+       char *cmp;
        int neg = 0;
        int ret;
        int i;
                return -EFAULT;
  
        buf[cnt] = 0;
+       cmp = strstrip(buf);
  
-       if (strncmp(buf, "no", 2) == 0) {
+       if (strncmp(cmp, "no", 2) == 0) {
                neg = 1;
                cmp += 2;
        }
  
        for (i = 0; trace_options[i]; i++) {
-               int len = strlen(trace_options[i]);
-               if (strncmp(cmp, trace_options[i], len) == 0) {
+               if (strcmp(cmp, trace_options[i]) == 0) {
                        set_tracer_flags(1 << i, !neg);
                        break;
                }
        return cnt;
  }
  
+ static int tracing_trace_options_open(struct inode *inode, struct file *file)
+ {
+       if (tracing_disabled)
+               return -ENODEV;
+       return single_open(file, tracing_trace_options_show, NULL);
+ }
  static const struct file_operations tracing_iter_fops = {
-       .open           = tracing_open_generic,
-       .read           = tracing_trace_options_read,
+       .open           = tracing_trace_options_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
        .write          = tracing_trace_options_write,
  };
  
@@@ -3133,7 -3122,7 +3122,7 @@@ static void tracing_spd_release_pipe(st
        __free_page(spd->pages[idx]);
  }
  
 -static struct pipe_buf_operations tracing_pipe_buf_ops = {
 +static const struct pipe_buf_operations tracing_pipe_buf_ops = {
        .can_merge              = 0,
        .map                    = generic_pipe_buf_map,
        .unmap                  = generic_pipe_buf_unmap,
@@@ -3392,21 -3381,18 +3381,18 @@@ tracing_mark_write(struct file *filp, c
        return cnt;
  }
  
- static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf,
-                                 size_t cnt, loff_t *ppos)
+ static int tracing_clock_show(struct seq_file *m, void *v)
  {
-       char buf[64];
-       int bufiter = 0;
        int i;
  
        for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
-               bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter,
+               seq_printf(m,
                        "%s%s%s%s", i ? " " : "",
                        i == trace_clock_id ? "[" : "", trace_clocks[i].name,
                        i == trace_clock_id ? "]" : "");
-       bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n");
+       seq_putc(m, '\n');
  
-       return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter);
+       return 0;
  }
  
  static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
        return cnt;
  }
  
+ static int tracing_clock_open(struct inode *inode, struct file *file)
+ {
+       if (tracing_disabled)
+               return -ENODEV;
+       return single_open(file, tracing_clock_show, NULL);
+ }
  static const struct file_operations tracing_max_lat_fops = {
        .open           = tracing_open_generic,
        .read           = tracing_max_lat_read,
@@@ -3486,8 -3479,10 +3479,10 @@@ static const struct file_operations tra
  };
  
  static const struct file_operations trace_clock_fops = {
-       .open           = tracing_open_generic,
-       .read           = tracing_clock_read,
+       .open           = tracing_clock_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
        .write          = tracing_clock_write,
  };
  
@@@ -3617,7 -3612,7 +3612,7 @@@ static void buffer_pipe_buf_get(struct 
  }
  
  /* Pipe buffer operations for a buffer. */
 -static struct pipe_buf_operations buffer_pipe_buf_ops = {
 +static const struct pipe_buf_operations buffer_pipe_buf_ops = {
        .can_merge              = 0,
        .map                    = generic_pipe_buf_map,
        .unmap                  = generic_pipe_buf_unmap,
@@@ -3948,39 -3943,16 +3943,16 @@@ trace_options_write(struct file *filp, 
        if (ret < 0)
                return ret;
  
-       ret = 0;
-       switch (val) {
-       case 0:
-               /* do nothing if already cleared */
-               if (!(topt->flags->val & topt->opt->bit))
-                       break;
-               mutex_lock(&trace_types_lock);
-               if (current_trace->set_flag)
-                       ret = current_trace->set_flag(topt->flags->val,
-                                                     topt->opt->bit, 0);
-               mutex_unlock(&trace_types_lock);
-               if (ret)
-                       return ret;
-               topt->flags->val &= ~topt->opt->bit;
-               break;
-       case 1:
-               /* do nothing if already set */
-               if (topt->flags->val & topt->opt->bit)
-                       break;
+       if (val != 0 && val != 1)
+               return -EINVAL;
  
+       if (!!(topt->flags->val & topt->opt->bit) != val) {
                mutex_lock(&trace_types_lock);
-               if (current_trace->set_flag)
-                       ret = current_trace->set_flag(topt->flags->val,
-                                                     topt->opt->bit, 1);
+               ret = __set_tracer_option(current_trace, topt->flags,
+                                         topt->opt, val);
                mutex_unlock(&trace_types_lock);
                if (ret)
                        return ret;
-               topt->flags->val |= topt->opt->bit;
-               break;
-       default:
-               return -EINVAL;
        }
  
        *ppos += cnt;
@@@ -4307,8 -4279,8 +4279,8 @@@ trace_printk_seq(struct trace_seq *s
  
  static void __ftrace_dump(bool disable_tracing)
  {
 -      static raw_spinlock_t ftrace_dump_lock =
 -              (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
 +      static arch_spinlock_t ftrace_dump_lock =
 +              (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
        /* use static because iter can be a bit big for the stack */
        static struct trace_iterator iter;
        unsigned int old_userobj;
  
        /* only one dump */
        local_irq_save(flags);
 -      __raw_spin_lock(&ftrace_dump_lock);
 +      arch_spin_lock(&ftrace_dump_lock);
        if (dump_ran)
                goto out;
  
        }
  
   out:
 -      __raw_spin_unlock(&ftrace_dump_lock);
 +      arch_spin_unlock(&ftrace_dump_lock);
        local_irq_restore(flags);
  }
  
@@@ -4454,7 -4426,7 +4426,7 @@@ __init static int tracer_alloc_buffers(
        /* Allocate the first page for all buffers */
        for_each_tracing_cpu(i) {
                global_trace.data[i] = &per_cpu(global_trace_cpu, i);
 -              max_tr.data[i] = &per_cpu(max_data, i);
 +              max_tr.data[i] = &per_cpu(max_tr_data, i);
        }
  
        trace_init_cmdlines();
diff --combined kernel/trace/trace.h
index a52bed2eedd848ec5e7c249d479baa8d14a98ad6,1b18cb240c165b4cbfe6eee7cc4312fe805ac7b8..4df6a77eb1966bd4ea12779121f12e96ddf1d3f6
@@@ -443,7 -443,7 +443,7 @@@ extern int DYN_FTRACE_TEST_NAME(void)
  
  extern int ring_buffer_expanded;
  extern bool tracing_selftest_disabled;
 -DECLARE_PER_CPU(local_t, ftrace_cpu_disabled);
 +DECLARE_PER_CPU(int, ftrace_cpu_disabled);
  
  #ifdef CONFIG_FTRACE_STARTUP_TEST
  extern int trace_selftest_startup_function(struct tracer *trace,
@@@ -597,18 -597,17 +597,17 @@@ enum trace_iterator_flags 
        TRACE_ITER_BIN                  = 0x40,
        TRACE_ITER_BLOCK                = 0x80,
        TRACE_ITER_STACKTRACE           = 0x100,
-       TRACE_ITER_SCHED_TREE           = 0x200,
-       TRACE_ITER_PRINTK               = 0x400,
-       TRACE_ITER_PREEMPTONLY          = 0x800,
-       TRACE_ITER_BRANCH               = 0x1000,
-       TRACE_ITER_ANNOTATE             = 0x2000,
-       TRACE_ITER_USERSTACKTRACE       = 0x4000,
-       TRACE_ITER_SYM_USEROBJ          = 0x8000,
-       TRACE_ITER_PRINTK_MSGONLY       = 0x10000,
-       TRACE_ITER_CONTEXT_INFO         = 0x20000, /* Print pid/cpu/time */
-       TRACE_ITER_LATENCY_FMT          = 0x40000,
-       TRACE_ITER_SLEEP_TIME           = 0x80000,
-       TRACE_ITER_GRAPH_TIME           = 0x100000,
+       TRACE_ITER_PRINTK               = 0x200,
+       TRACE_ITER_PREEMPTONLY          = 0x400,
+       TRACE_ITER_BRANCH               = 0x800,
+       TRACE_ITER_ANNOTATE             = 0x1000,
+       TRACE_ITER_USERSTACKTRACE       = 0x2000,
+       TRACE_ITER_SYM_USEROBJ          = 0x4000,
+       TRACE_ITER_PRINTK_MSGONLY       = 0x8000,
+       TRACE_ITER_CONTEXT_INFO         = 0x10000, /* Print pid/cpu/time */
+       TRACE_ITER_LATENCY_FMT          = 0x20000,
+       TRACE_ITER_SLEEP_TIME           = 0x40000,
+       TRACE_ITER_GRAPH_TIME           = 0x80000,
  };
  
  /*