]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag 'trace-3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 3 Apr 2014 17:26:31 +0000 (10:26 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 3 Apr 2014 17:26:31 +0000 (10:26 -0700)
Pull tracing updates from Steven Rostedt:
 "Most of the changes were largely clean ups, and some documentation.
  But there were a few features that were added:

  Uprobes now work with event triggers and multi buffers and have
  support under ftrace and perf.

  The big feature is that the function tracer can now be used within the
  multi buffer instances.  That is, you can now trace some functions in
  one buffer, others in another buffer, all functions in a third buffer
  and so on.  They are basically agnostic from each other.  This only
  works for the function tracer and not for the function graph trace,
  although you can have the function graph tracer running in the top
  level buffer (or any tracer for that matter) and have different
  function tracing going on in the sub buffers"

* tag 'trace-3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (45 commits)
  tracing: Add BUG_ON when stack end location is over written
  tracepoint: Remove unused API functions
  Revert "tracing: Move event storage for array from macro to standalone function"
  ftrace: Constify ftrace_text_reserved
  tracepoints: API doc update to tracepoint_probe_register() return value
  tracepoints: API doc update to data argument
  ftrace: Fix compilation warning about control_ops_free
  ftrace/x86: BUG when ftrace recovery fails
  ftrace: Warn on error when modifying ftrace function
  ftrace: Remove freelist from struct dyn_ftrace
  ftrace: Do not pass data to ftrace_dyn_arch_init
  ftrace: Pass retval through return in ftrace_dyn_arch_init()
  ftrace: Inline the code from ftrace_dyn_table_alloc()
  ftrace: Cleanup of global variables ftrace_new_pgs and ftrace_update_cnt
  tracing: Evaluate len expression only once in __dynamic_array macro
  tracing: Correctly expand len expressions from __dynamic_array macro
  tracing/module: Replace include of tracepoint.h with jump_label.h in module.h
  tracing: Fix event header migrate.h to include tracepoint.h
  tracing: Fix event header writeback.h to include tracepoint.h
  tracing: Warn if a tracepoint is not set via debugfs
  ...

1  2 
arch/mips/kernel/ftrace.c
arch/powerpc/kernel/ftrace.c
include/linux/ftrace_event.h
include/linux/tracepoint.h
include/trace/events/writeback.h
include/trace/ftrace.h
kernel/trace/blktrace.c
kernel/trace/trace.c
kernel/trace/trace_events.c
kernel/trace/trace_irqsoff.c
kernel/tracepoint.c

index 74fe73506d8f9ce8a1eb5f3536df282829f02c7e,1ba7afe6ab74a55d93e95139b115d1980441c254..60e7e5e45af15135375a0183cca9759ff06442c0
@@@ -90,7 -90,6 +90,7 @@@ static inline void ftrace_dyn_arch_init
  static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
  {
        int faulted;
 +      mm_segment_t old_fs;
  
        /* *(unsigned int *)ip = new_code; */
        safe_store_code(new_code, ip, faulted);
        if (unlikely(faulted))
                return -EFAULT;
  
 +      old_fs = get_fs();
 +      set_fs(get_ds());
        flush_icache_range(ip, ip + 8);
 +      set_fs(old_fs);
  
        return 0;
  }
@@@ -115,10 -111,11 +115,10 @@@ static int ftrace_modify_code_2(unsigne
        safe_store_code(new_code1, ip, faulted);
        if (unlikely(faulted))
                return -EFAULT;
 -      ip += 4;
 -      safe_store_code(new_code2, ip, faulted);
 +      safe_store_code(new_code2, ip + 4, faulted);
        if (unlikely(faulted))
                return -EFAULT;
 -      flush_icache_range(ip, ip + 8); /* original ip + 12 */
 +      flush_icache_range(ip, ip + 8);
        return 0;
  }
  #endif
@@@ -201,7 -198,7 +201,7 @@@ int ftrace_update_ftrace_func(ftrace_fu
        return ftrace_modify_code(FTRACE_CALL_IP, new);
  }
  
- int __init ftrace_dyn_arch_init(void *data)
+ int __init ftrace_dyn_arch_init(void)
  {
        /* Encode the instructions when booting */
        ftrace_dyn_arch_init_insns();
        /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
        ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
  
-       /* The return code is retured via data */
-       *(unsigned long *)data = 0;
        return 0;
  }
  #endif        /* CONFIG_DYNAMIC_FTRACE */
index b0ded97ee4e11148cd436809aaab2c36c106893a,71ce4cbb7e9f268840369513ac411052a3d27d8b..6a014c763cc71da4a9a5fd523649f105bf5fdc10
@@@ -74,7 -74,6 +74,7 @@@ ftrace_modify_code(unsigned long ip, un
   */
  static int test_24bit_addr(unsigned long ip, unsigned long addr)
  {
 +      addr = ppc_function_entry((void *)addr);
  
        /* use the create_branch to verify that this offset can be branched */
        return create_branch((unsigned int *)ip, addr, 0);
@@@ -532,13 -531,8 +532,8 @@@ void arch_ftrace_update_code(int comman
                ftrace_disable_ftrace_graph_caller();
  }
  
- int __init ftrace_dyn_arch_init(void *data)
+ int __init ftrace_dyn_arch_init(void)
  {
-       /* caller expects data to be zero */
-       unsigned long *p = data;
-       *p = 0;
        return 0;
  }
  #endif /* CONFIG_DYNAMIC_FTRACE */
index 4cdb3a17bcb5932113020955cf8aa46b7b592f14,cdc975929d152f84ee8de342cdae64dc925a4657..cdc30111d2f8d0b1b16c832d4aeb481ea4e68e80
@@@ -163,6 -163,8 +163,8 @@@ void trace_current_buffer_discard_commi
  
  void tracing_record_cmdline(struct task_struct *tsk);
  
+ int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
  struct event_filter;
  
  enum trace_reg {
@@@ -197,6 -199,32 +199,32 @@@ struct ftrace_event_class 
  extern int ftrace_event_reg(struct ftrace_event_call *event,
                            enum trace_reg type, void *data);
  
+ int ftrace_output_event(struct trace_iterator *iter, struct ftrace_event_call *event,
+                       char *fmt, ...);
+ int ftrace_event_define_field(struct ftrace_event_call *call,
+                             char *type, int len, char *item, int offset,
+                             int field_size, int sign, int filter);
+ struct ftrace_event_buffer {
+       struct ring_buffer              *buffer;
+       struct ring_buffer_event        *event;
+       struct ftrace_event_file        *ftrace_file;
+       void                            *entry;
+       unsigned long                   flags;
+       int                             pc;
+ };
+ void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
+                                 struct ftrace_event_file *ftrace_file,
+                                 unsigned long len);
+ void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer);
+ int ftrace_event_define_field(struct ftrace_event_call *call,
+                             char *type, int len, char *item, int offset,
+                             int field_size, int sign, int filter);
  enum {
        TRACE_EVENT_FL_FILTERED_BIT,
        TRACE_EVENT_FL_CAP_ANY_BIT,
@@@ -495,6 -523,10 +523,6 @@@ enum 
        FILTER_TRACE_FN,
  };
  
 -#define EVENT_STORAGE_SIZE 128
 -extern struct mutex event_storage_mutex;
 -extern char event_storage[EVENT_STORAGE_SIZE];
 -
  extern int trace_event_raw_init(struct ftrace_event_call *call);
  extern int trace_define_field(struct ftrace_event_call *call, const char *type,
                              const char *name, int offset, int size,
index 7159a0a933df2b016db23cdcd87487c19d5829d0,a3b2837d8dd1f961faa81959c7a8b85bd8b6e959..812b2553dfd84c78d4c4eab4e580b554e0edf2ab
@@@ -48,38 -48,14 +48,20 @@@ extern int tracepoint_probe_register(co
  extern int
  tracepoint_probe_unregister(const char *name, void *probe, void *data);
  
- extern int tracepoint_probe_register_noupdate(const char *name, void *probe,
-                                             void *data);
- extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
-                                               void *data);
- extern void tracepoint_probe_update_all(void);
  #ifdef CONFIG_MODULES
  struct tp_module {
        struct list_head list;
        unsigned int num_tracepoints;
        struct tracepoint * const *tracepoints_ptrs;
  };
 +bool trace_module_has_bad_taint(struct module *mod);
 +#else
 +static inline bool trace_module_has_bad_taint(struct module *mod)
 +{
 +      return false;
 +}
  #endif /* CONFIG_MODULES */
  
- struct tracepoint_iter {
- #ifdef CONFIG_MODULES
-       struct tp_module *module;
- #endif /* CONFIG_MODULES */
-       struct tracepoint * const *tracepoint;
- };
- extern void tracepoint_iter_start(struct tracepoint_iter *iter);
- extern void tracepoint_iter_next(struct tracepoint_iter *iter);
- extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
- extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
  /*
   * tracepoint_synchronize_unregister must be called between the last tracepoint
   * probe unregistration and the end of module exit to make sure there is no
index 464ea82e10dbf1f1a519b75c52f9e129a5a715e0,309a086e2a0bed9f4914571e12e835d14b5a1c3a..cee02d65ab3f1707080902de3375d828f1e0a526
@@@ -4,6 -4,7 +4,7 @@@
  #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
  #define _TRACE_WRITEBACK_H
  
+ #include <linux/tracepoint.h>
  #include <linux/backing-dev.h>
  #include <linux/writeback.h>
  
@@@ -287,11 -288,11 +288,11 @@@ TRACE_EVENT(writeback_queue_io
                __field(int,            reason)
        ),
        TP_fast_assign(
 -              unsigned long older_than_this = work->older_than_this;
 +              unsigned long *older_than_this = work->older_than_this;
                strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
 -              __entry->older  = older_than_this;
 +              __entry->older  = older_than_this ?  *older_than_this : 0;
                __entry->age    = older_than_this ?
 -                                (jiffies - older_than_this) * 1000 / HZ : -1;
 +                                (jiffies - *older_than_this) * 1000 / HZ : -1;
                __entry->moved  = moved;
                __entry->reason = work->reason;
        ),
diff --combined include/trace/ftrace.h
index 1ee19a24cc5f7170a3dab525767c669f3773dc7e,d1d91875faa52248b6629a76390ce3e9d039571b..8765126b328ce4220d3c61af8c655f0226f29564
@@@ -265,11 -265,9 +265,9 @@@ static notrace enum print_line_t                                  
  ftrace_raw_output_##call(struct trace_iterator *iter, int flags,      \
                         struct trace_event *event)                     \
  {                                                                     \
-       struct trace_seq *s = &iter->seq;                               \
        struct ftrace_raw_##template *field;                            \
        struct trace_entry *entry;                                      \
        struct trace_seq *p = &iter->tmp_seq;                           \
-       int ret;                                                        \
                                                                        \
        entry = iter->ent;                                              \
                                                                        \
        field = (typeof(field))entry;                                   \
                                                                        \
        trace_seq_init(p);                                              \
-       ret = trace_seq_printf(s, "%s: ", #call);                       \
-       if (ret)                                                        \
-               ret = trace_seq_printf(s, print);                       \
-       if (!ret)                                                       \
-               return TRACE_TYPE_PARTIAL_LINE;                         \
-                                                                       \
-       return TRACE_TYPE_HANDLED;                                      \
+       return ftrace_output_call(iter, #call, print);                  \
  }                                                                     \
  static struct trace_event_functions ftrace_event_type_funcs_##call = {        \
        .trace                  = ftrace_raw_output_##call,             \
  #undef __array
  #define __array(type, item, len)                                      \
        do {                                                            \
 -              mutex_lock(&event_storage_mutex);                       \
 +              char *type_str = #type"["__stringify(len)"]";           \
                BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);                 \
 -              snprintf(event_storage, sizeof(event_storage),          \
 -                       "%s[%d]", #type, len);                         \
 -              ret = trace_define_field(event_call, event_storage, #item, \
 +              ret = trace_define_field(event_call, type_str, #item,   \
                                 offsetof(typeof(field), item),         \
                                 sizeof(field.item),                    \
                                 is_signed_type(type), FILTER_OTHER);   \
 -              mutex_unlock(&event_storage_mutex);                     \
                if (ret)                                                \
                        return ret;                                     \
        } while (0);
@@@ -370,10 -365,11 +362,11 @@@ ftrace_define_fields_##call(struct ftra
  
  #undef __dynamic_array
  #define __dynamic_array(type, item, len)                              \
+       __item_length = (len) * sizeof(type);                           \
        __data_offsets->item = __data_size +                            \
                               offsetof(typeof(*entry), __data);        \
-       __data_offsets->item |= (len * sizeof(type)) << 16;             \
-       __data_size += (len) * sizeof(type);
+       __data_offsets->item |= __item_length << 16;                    \
+       __data_size += __item_length;
  
  #undef __string
  #define __string(item, src) __dynamic_array(char, item,                       \
@@@ -385,6 -381,7 +378,7 @@@ static inline notrace int ftrace_get_of
        struct ftrace_data_offsets_##call *__data_offsets, proto)       \
  {                                                                     \
        int __data_size = 0;                                            \
+       int __maybe_unused __item_length;                               \
        struct ftrace_raw_##call __maybe_unused *entry;                 \
                                                                        \
        tstruct;                                                        \
@@@ -541,37 -538,27 +535,27 @@@ static notrace void                                                     
  ftrace_raw_event_##call(void *__data, proto)                          \
  {                                                                     \
        struct ftrace_event_file *ftrace_file = __data;                 \
-       struct ftrace_event_call *event_call = ftrace_file->event_call; \
        struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
-       struct ring_buffer_event *event;                                \
+       struct ftrace_event_buffer fbuffer;                             \
        struct ftrace_raw_##call *entry;                                \
-       struct ring_buffer *buffer;                                     \
-       unsigned long irq_flags;                                        \
        int __data_size;                                                \
-       int pc;                                                         \
                                                                        \
        if (ftrace_trigger_soft_disabled(ftrace_file))                  \
                return;                                                 \
                                                                        \
-       local_save_flags(irq_flags);                                    \
-       pc = preempt_count();                                           \
-                                                                       \
        __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
                                                                        \
-       event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,   \
-                                event_call->event.type,                \
-                                sizeof(*entry) + __data_size,          \
-                                irq_flags, pc);                        \
-       if (!event)                                                     \
+       entry = ftrace_event_buffer_reserve(&fbuffer, ftrace_file,      \
+                                sizeof(*entry) + __data_size);         \
+                                                                       \
+       if (!entry)                                                     \
                return;                                                 \
-       entry   = ring_buffer_event_data(event);                        \
                                                                        \
        tstruct                                                         \
                                                                        \
        { assign; }                                                     \
                                                                        \
-       event_trigger_unlock_commit(ftrace_file, buffer, event, entry, \
-                                   irq_flags, pc);                    \
+       ftrace_event_buffer_commit(&fbuffer);                           \
  }
  /*
   * The ftrace_test_probe is compiled out, it is only here as a build time check
diff --combined kernel/trace/blktrace.c
index 4f3a3c03eadbf252545e9fc3259c34ff3f4da636,0d758ca619330ac5dc30395cb5f4695f80be9808..c1bd4ada2a044f9b8eb8971ef41e05b26790b31d
@@@ -702,7 -702,6 +702,7 @@@ void blk_trace_shutdown(struct request_
   * blk_add_trace_rq - Add a trace for a request oriented action
   * @q:                queue the io is for
   * @rq:               the source request
 + * @nr_bytes: number of completed bytes
   * @what:     the action
   *
   * Description:
   *
   **/
  static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
 -                           u32 what)
 +                           unsigned int nr_bytes, u32 what)
  {
        struct blk_trace *bt = q->blk_trace;
  
  
        if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
                what |= BLK_TC_ACT(BLK_TC_PC);
 -              __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags,
 +              __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
                                what, rq->errors, rq->cmd_len, rq->cmd);
        } else  {
                what |= BLK_TC_ACT(BLK_TC_FS);
 -              __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
 +              __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
                                rq->cmd_flags, what, rq->errors, 0, NULL);
        }
  }
  static void blk_add_trace_rq_abort(void *ignore,
                                   struct request_queue *q, struct request *rq)
  {
 -      blk_add_trace_rq(q, rq, BLK_TA_ABORT);
 +      blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT);
  }
  
  static void blk_add_trace_rq_insert(void *ignore,
                                    struct request_queue *q, struct request *rq)
  {
 -      blk_add_trace_rq(q, rq, BLK_TA_INSERT);
 +      blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT);
  }
  
  static void blk_add_trace_rq_issue(void *ignore,
                                   struct request_queue *q, struct request *rq)
  {
 -      blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
 +      blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE);
  }
  
  static void blk_add_trace_rq_requeue(void *ignore,
                                     struct request_queue *q,
                                     struct request *rq)
  {
 -      blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
 +      blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE);
  }
  
  static void blk_add_trace_rq_complete(void *ignore,
                                      struct request_queue *q,
 -                                    struct request *rq)
 +                                    struct request *rq,
 +                                    unsigned int nr_bytes)
  {
 -      blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
 +      blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE);
  }
  
  /**
@@@ -1429,7 -1427,8 +1429,8 @@@ static enum print_line_t blk_tracer_pri
        return print_one_line(iter, true);
  }
  
- static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set)
+ static int
+ blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  {
        /* don't output context-info for blk_classic output */
        if (bit == TRACE_BLK_OPT_CLASSIC) {
diff --combined kernel/trace/trace.c
index 24c1f23825579df4f2bf46e2dca98e6af8fb4abd,c90f55d80f86f2d132f87c418da34807e9e30df3..9be67c5e5b0f1eb78799db5e24c81cedda1053b3
@@@ -73,7 -73,8 +73,8 @@@ static struct tracer_flags dummy_tracer
        .opts = dummy_tracer_opt
  };
  
- static int dummy_set_flag(u32 old_flags, u32 bit, int set)
+ static int
+ dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  {
        return 0;
  }
@@@ -118,7 -119,7 +119,7 @@@ enum ftrace_dump_mode ftrace_dump_on_oo
  /* When set, tracing will stop when a WARN*() is hit */
  int __disable_trace_on_warning;
  
- static int tracing_set_tracer(const char *buf);
+ static int tracing_set_tracer(struct trace_array *tr, const char *buf);
  
  #define MAX_TRACER_SIZE               100
  static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
@@@ -180,6 -181,17 +181,17 @@@ static int __init set_trace_boot_option
  }
  __setup("trace_options=", set_trace_boot_options);
  
+ static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
+ static char *trace_boot_clock __initdata;
+ static int __init set_trace_boot_clock(char *str)
+ {
+       strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
+       trace_boot_clock = trace_boot_clock_buf;
+       return 0;
+ }
+ __setup("trace_clock=", set_trace_boot_clock);
  
  unsigned long long ns2usecs(cycle_t nsec)
  {
@@@ -1230,7 -1242,7 +1242,7 @@@ int register_tracer(struct tracer *type
  
        printk(KERN_INFO "Starting tracer '%s'\n", type->name);
        /* Do we want this tracer to start on bootup? */
-       tracing_set_tracer(type->name);
+       tracing_set_tracer(&global_trace, type->name);
        default_bootup_tracer = NULL;
        /* disable other selftests, since this will break it. */
        tracing_selftest_disabled = true;
@@@ -1600,31 -1612,15 +1612,31 @@@ void trace_buffer_unlock_commit(struct 
  }
  EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
  
 +static struct ring_buffer *temp_buffer;
 +
  struct ring_buffer_event *
  trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
                          struct ftrace_event_file *ftrace_file,
                          int type, unsigned long len,
                          unsigned long flags, int pc)
  {
 +      struct ring_buffer_event *entry;
 +
        *current_rb = ftrace_file->tr->trace_buffer.buffer;
 -      return trace_buffer_lock_reserve(*current_rb,
 +      entry = trace_buffer_lock_reserve(*current_rb,
                                         type, len, flags, pc);
 +      /*
 +       * If tracing is off, but we have triggers enabled
 +       * we still need to look at the event data. Use the temp_buffer
 +       * to store the trace event for the tigger to use. It's recusive
 +       * safe and will not be recorded anywhere.
 +       */
 +      if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
 +              *current_rb = temp_buffer;
 +              entry = trace_buffer_lock_reserve(*current_rb,
 +                                                type, len, flags, pc);
 +      }
 +      return entry;
  }
  EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
  
@@@ -3137,27 -3133,52 +3149,52 @@@ static int tracing_open(struct inode *i
        return ret;
  }
  
+ /*
+  * Some tracers are not suitable for instance buffers.
+  * A tracer is always available for the global array (toplevel)
+  * or if it explicitly states that it is.
+  */
+ static bool
+ trace_ok_for_array(struct tracer *t, struct trace_array *tr)
+ {
+       return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
+ }
+ /* Find the next tracer that this trace array may use */
+ static struct tracer *
+ get_tracer_for_array(struct trace_array *tr, struct tracer *t)
+ {
+       while (t && !trace_ok_for_array(t, tr))
+               t = t->next;
+       return t;
+ }
  static void *
  t_next(struct seq_file *m, void *v, loff_t *pos)
  {
+       struct trace_array *tr = m->private;
        struct tracer *t = v;
  
        (*pos)++;
  
        if (t)
-               t = t->next;
+               t = get_tracer_for_array(tr, t->next);
  
        return t;
  }
  
  static void *t_start(struct seq_file *m, loff_t *pos)
  {
+       struct trace_array *tr = m->private;
        struct tracer *t;
        loff_t l = 0;
  
        mutex_lock(&trace_types_lock);
-       for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
-               ;
+       t = get_tracer_for_array(tr, trace_types);
+       for (; t && l < *pos; t = t_next(m, t, &l))
+                       ;
  
        return t;
  }
@@@ -3192,10 -3213,21 +3229,21 @@@ static const struct seq_operations show
  
  static int show_traces_open(struct inode *inode, struct file *file)
  {
+       struct trace_array *tr = inode->i_private;
+       struct seq_file *m;
+       int ret;
        if (tracing_disabled)
                return -ENODEV;
  
-       return seq_open(file, &show_traces_seq_ops);
+       ret = seq_open(file, &show_traces_seq_ops);
+       if (ret)
+               return ret;
+       m = file->private_data;
+       m->private = tr;
+       return 0;
  }
  
  static ssize_t
@@@ -3355,13 -3387,14 +3403,14 @@@ static int tracing_trace_options_show(s
        return 0;
  }
  
- static int __set_tracer_option(struct tracer *trace,
+ static int __set_tracer_option(struct trace_array *tr,
                               struct tracer_flags *tracer_flags,
                               struct tracer_opt *opts, int neg)
  {
+       struct tracer *trace = tr->current_trace;
        int ret;
  
-       ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
+       ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
        if (ret)
                return ret;
  
  }
  
  /* Try to assign a tracer specific option */
- static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
+ static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
  {
+       struct tracer *trace = tr->current_trace;
        struct tracer_flags *tracer_flags = trace->flags;
        struct tracer_opt *opts = NULL;
        int i;
                opts = &tracer_flags->opts[i];
  
                if (strcmp(cmp, opts->name) == 0)
-                       return __set_tracer_option(trace, trace->flags,
-                                                  opts, neg);
+                       return __set_tracer_option(tr, trace->flags, opts, neg);
        }
  
        return -EINVAL;
@@@ -3407,7 -3440,7 +3456,7 @@@ int set_tracer_flag(struct trace_array 
  
        /* Give the tracer a chance to approve the change */
        if (tr->current_trace->flag_changed)
-               if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled))
+               if (tr->current_trace->flag_changed(tr, mask, !!enabled))
                        return -EINVAL;
  
        if (enabled)
@@@ -3456,7 -3489,7 +3505,7 @@@ static int trace_set_options(struct tra
  
        /* If no option could be set, test the specific tracer options */
        if (!trace_options[i])
-               ret = set_tracer_option(tr->current_trace, cmp, neg);
+               ret = set_tracer_option(tr, cmp, neg);
  
        mutex_unlock(&trace_types_lock);
  
@@@ -3885,10 -3918,26 +3934,26 @@@ create_trace_option_files(struct trace_
  static void
  destroy_trace_option_files(struct trace_option_dentry *topts);
  
- static int tracing_set_tracer(const char *buf)
+ /*
+  * Used to clear out the tracer before deletion of an instance.
+  * Must have trace_types_lock held.
+  */
+ static void tracing_set_nop(struct trace_array *tr)
+ {
+       if (tr->current_trace == &nop_trace)
+               return;
+       
+       tr->current_trace->enabled--;
+       if (tr->current_trace->reset)
+               tr->current_trace->reset(tr);
+       tr->current_trace = &nop_trace;
+ }
+ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
  {
        static struct trace_option_dentry *topts;
-       struct trace_array *tr = &global_trace;
        struct tracer *t;
  #ifdef CONFIG_TRACER_MAX_TRACE
        bool had_max_tr;
        if (t == tr->current_trace)
                goto out;
  
+       /* Some tracers are only allowed for the top level buffer */
+       if (!trace_ok_for_array(t, tr)) {
+               ret = -EINVAL;
+               goto out;
+       }
        trace_branch_disable();
  
-       tr->current_trace->enabled = false;
+       tr->current_trace->enabled--;
  
        if (tr->current_trace->reset)
                tr->current_trace->reset(tr);
                free_snapshot(tr);
        }
  #endif
-       destroy_trace_option_files(topts);
-       topts = create_trace_option_files(tr, t);
+       /* Currently, only the top instance has options */
+       if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
+               destroy_trace_option_files(topts);
+               topts = create_trace_option_files(tr, t);
+       }
  
  #ifdef CONFIG_TRACER_MAX_TRACE
        if (t->use_max_tr && !had_max_tr) {
        }
  
        tr->current_trace = t;
-       tr->current_trace->enabled = true;
+       tr->current_trace->enabled++;
        trace_branch_enable(tr);
   out:
        mutex_unlock(&trace_types_lock);
@@@ -3972,6 -4029,7 +4045,7 @@@ static ssize_
  tracing_set_trace_write(struct file *filp, const char __user *ubuf,
                        size_t cnt, loff_t *ppos)
  {
+       struct trace_array *tr = filp->private_data;
        char buf[MAX_TRACER_SIZE+1];
        int i;
        size_t ret;
        for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
                buf[i] = 0;
  
-       err = tracing_set_tracer(buf);
+       err = tracing_set_tracer(tr, buf);
        if (err)
                return err;
  
@@@ -4699,25 -4757,10 +4773,10 @@@ static int tracing_clock_show(struct se
        return 0;
  }
  
- static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
-                                  size_t cnt, loff_t *fpos)
+ static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
  {
-       struct seq_file *m = filp->private_data;
-       struct trace_array *tr = m->private;
-       char buf[64];
-       const char *clockstr;
        int i;
  
-       if (cnt >= sizeof(buf))
-               return -EINVAL;
-       if (copy_from_user(&buf, ubuf, cnt))
-               return -EFAULT;
-       buf[cnt] = 0;
-       clockstr = strstrip(buf);
        for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
                if (strcmp(trace_clocks[i].name, clockstr) == 0)
                        break;
  
        mutex_unlock(&trace_types_lock);
  
+       return 0;
+ }
+ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
+                                  size_t cnt, loff_t *fpos)
+ {
+       struct seq_file *m = filp->private_data;
+       struct trace_array *tr = m->private;
+       char buf[64];
+       const char *clockstr;
+       int ret;
+       if (cnt >= sizeof(buf))
+               return -EINVAL;
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+       buf[cnt] = 0;
+       clockstr = strstrip(buf);
+       ret = tracing_set_clock(tr, clockstr);
+       if (ret)
+               return ret;
        *fpos += cnt;
  
        return cnt;
@@@ -5705,7 -5774,7 +5790,7 @@@ trace_options_write(struct file *filp, 
  
        if (!!(topt->flags->val & topt->opt->bit) != val) {
                mutex_lock(&trace_types_lock);
-               ret = __set_tracer_option(topt->tr->current_trace, topt->flags,
+               ret = __set_tracer_option(topt->tr, topt->flags,
                                          topt->opt, !val);
                mutex_unlock(&trace_types_lock);
                if (ret)
@@@ -6112,7 -6181,9 +6197,9 @@@ static int instance_delete(const char *
  
        list_del(&tr->list);
  
+       tracing_set_nop(tr);
        event_trace_del_tracer(tr);
+       ftrace_destroy_function_files(tr);
        debugfs_remove_recursive(tr->dir);
        free_percpu(tr->trace_buffer.data);
        ring_buffer_free(tr->trace_buffer.buffer);
@@@ -6207,6 -6278,12 +6294,12 @@@ init_tracer_debugfs(struct trace_array 
  {
        int cpu;
  
+       trace_create_file("available_tracers", 0444, d_tracer,
+                       tr, &show_traces_fops);
+       trace_create_file("current_tracer", 0644, d_tracer,
+                       tr, &set_tracer_fops);
        trace_create_file("tracing_cpumask", 0644, d_tracer,
                          tr, &tracing_cpumask_fops);
  
        trace_create_file("tracing_on", 0644, d_tracer,
                          tr, &rb_simple_fops);
  
+       if (ftrace_create_function_files(tr, d_tracer))
+               WARN(1, "Could not allocate function filter files");
  #ifdef CONFIG_TRACER_SNAPSHOT
        trace_create_file("snapshot", 0644, d_tracer,
                          tr, &snapshot_fops);
@@@ -6259,12 -6339,6 +6355,6 @@@ static __init int tracer_init_debugfs(v
  
        init_tracer_debugfs(&global_trace, d_tracer);
  
-       trace_create_file("available_tracers", 0444, d_tracer,
-                       &global_trace, &show_traces_fops);
-       trace_create_file("current_tracer", 0644, d_tracer,
-                       &global_trace, &set_tracer_fops);
  #ifdef CONFIG_TRACER_MAX_TRACE
        trace_create_file("tracing_max_latency", 0644, d_tracer,
                        &tracing_max_latency, &tracing_max_lat_fops);
@@@ -6510,16 -6584,11 +6600,16 @@@ __init static int tracer_alloc_buffers(
  
        raw_spin_lock_init(&global_trace.start_lock);
  
 +      /* Used for event triggers */
 +      temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
 +      if (!temp_buffer)
 +              goto out_free_cpumask;
 +
        /* TODO: make the number of buffers hot pluggable with CPUS */
        if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
                printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
                WARN_ON(1);
 -              goto out_free_cpumask;
 +              goto out_free_temp_buffer;
        }
  
        if (global_trace.buffer_disabled)
  
        trace_init_cmdlines();
  
+       if (trace_boot_clock) {
+               ret = tracing_set_clock(&global_trace, trace_boot_clock);
+               if (ret < 0)
+                       pr_warning("Trace clock %s not defined, going back to default\n",
+                                  trace_boot_clock);
+       }
        /*
         * register_tracer() might reference current_trace, so it
         * needs to be set before we register anything. This is
  
        return 0;
  
 +out_free_temp_buffer:
 +      ring_buffer_free(temp_buffer);
  out_free_cpumask:
        free_percpu(global_trace.trace_buffer.data);
  #ifdef CONFIG_TRACER_MAX_TRACE
index 7b16d40bd64d934d2be40e146bf8baa8e074487d,2f7b8e31e3a4c21930e4231544accd56954d2623..83a4378dc5e00b91eebef35ed5e05898ff72295e
  
  DEFINE_MUTEX(event_mutex);
  
 -DEFINE_MUTEX(event_storage_mutex);
 -EXPORT_SYMBOL_GPL(event_storage_mutex);
 -
 -char event_storage[EVENT_STORAGE_SIZE];
 -EXPORT_SYMBOL_GPL(event_storage);
 -
  LIST_HEAD(ftrace_events);
  static LIST_HEAD(ftrace_common_fields);
  
@@@ -188,6 -194,36 +188,36 @@@ int trace_event_raw_init(struct ftrace_
  }
  EXPORT_SYMBOL_GPL(trace_event_raw_init);
  
+ void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
+                                 struct ftrace_event_file *ftrace_file,
+                                 unsigned long len)
+ {
+       struct ftrace_event_call *event_call = ftrace_file->event_call;
+       local_save_flags(fbuffer->flags);
+       fbuffer->pc = preempt_count();
+       fbuffer->ftrace_file = ftrace_file;
+       fbuffer->event =
+               trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file,
+                                               event_call->event.type, len,
+                                               fbuffer->flags, fbuffer->pc);
+       if (!fbuffer->event)
+               return NULL;
+       fbuffer->entry = ring_buffer_event_data(fbuffer->event);
+       return fbuffer->entry;
+ }
+ EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve);
+ void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer)
+ {
+       event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer,
+                                   fbuffer->event, fbuffer->entry,
+                                   fbuffer->flags, fbuffer->pc);
+ }
+ EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit);
  int ftrace_event_reg(struct ftrace_event_call *call,
                     enum trace_reg type, void *data)
  {
@@@ -1771,16 -1807,6 +1801,16 @@@ static void trace_module_add_events(str
  {
        struct ftrace_event_call **call, **start, **end;
  
 +      if (!mod->num_trace_events)
 +              return;
 +
 +      /* Don't add infrastructure for mods without tracepoints */
 +      if (trace_module_has_bad_taint(mod)) {
 +              pr_err("%s: module has bad taint, not creating trace events\n",
 +                     mod->name);
 +              return;
 +      }
 +
        start = mod->trace_events;
        end = mod->trace_events + mod->num_trace_events;
  
index 887ef88b0bc70e10463a37da502e1718385e35ca,4bf812f454e686a29041ef860f51a6a2beee675b..8ff02cbb892fb4242a6c5a6d5c71fe07bdef878f
@@@ -160,7 -160,8 +160,8 @@@ static struct ftrace_ops trace_ops __re
  #endif /* CONFIG_FUNCTION_TRACER */
  
  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
- static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
+ static int
+ irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  {
        int cpu;
  
@@@ -266,7 -267,8 +267,8 @@@ __trace_function(struct trace_array *tr
  #else
  #define __trace_function trace_function
  
- static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
+ static int
+ irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  {
        return -EINVAL;
  }
@@@ -498,14 -500,14 +500,14 @@@ void trace_hardirqs_off(void
  }
  EXPORT_SYMBOL(trace_hardirqs_off);
  
 -void trace_hardirqs_on_caller(unsigned long caller_addr)
 +__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
  {
        if (!preempt_trace() && irq_trace())
                stop_critical_timing(CALLER_ADDR0, caller_addr);
  }
  EXPORT_SYMBOL(trace_hardirqs_on_caller);
  
 -void trace_hardirqs_off_caller(unsigned long caller_addr)
 +__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
  {
        if (!preempt_trace() && irq_trace())
                start_critical_timing(CALLER_ADDR0, caller_addr);
@@@ -570,8 -572,10 +572,10 @@@ static void irqsoff_function_set(int se
                unregister_irqsoff_function(is_graph());
  }
  
- static int irqsoff_flag_changed(struct tracer *tracer, u32 mask, int set)
+ static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
  {
+       struct tracer *tracer = tr->current_trace;
        if (mask & TRACE_ITER_FUNCTION)
                irqsoff_function_set(set);
  
diff --combined kernel/tracepoint.c
index 031cc5655a514d2bcf89f930388dcddda396986f,65d9f9459a7576ed09da22df02730b5ea190c987..50f8329c20425decc51420f5d733b15193a56091
@@@ -62,14 -62,12 +62,12 @@@ struct tracepoint_entry 
        struct hlist_node hlist;
        struct tracepoint_func *funcs;
        int refcount;   /* Number of times armed. 0 if disarmed. */
+       int enabled;    /* Tracepoint enabled */
        char name[0];
  };
  
  struct tp_probes {
-       union {
-               struct rcu_head rcu;
-               struct list_head list;
-       } u;
+       struct rcu_head rcu;
        struct tracepoint_func probes[0];
  };
  
@@@ -82,7 -80,7 +80,7 @@@ static inline void *allocate_probes(in
  
  static void rcu_free_old_probes(struct rcu_head *head)
  {
-       kfree(container_of(head, struct tp_probes, u.rcu));
+       kfree(container_of(head, struct tp_probes, rcu));
  }
  
  static inline void release_probes(struct tracepoint_func *old)
@@@ -90,7 -88,7 +88,7 @@@
        if (old) {
                struct tp_probes *tp_probes = container_of(old,
                        struct tp_probes, probes[0]);
-               call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes);
+               call_rcu_sched(&tp_probes->rcu, rcu_free_old_probes);
        }
  }
  
@@@ -237,6 -235,7 +235,7 @@@ static struct tracepoint_entry *add_tra
        memcpy(&e->name[0], name, name_len);
        e->funcs = NULL;
        e->refcount = 0;
+       e->enabled = 0;
        hlist_add_head(&e->hlist, head);
        return e;
  }
@@@ -316,6 -315,7 +315,7 @@@ static void tracepoint_update_probe_ran
                if (mark_entry) {
                        set_tracepoint(&mark_entry, *iter,
                                        !!mark_entry->refcount);
+                       mark_entry->enabled = !!mark_entry->refcount;
                } else {
                        disable_tracepoint(*iter);
                }
@@@ -373,13 -373,26 +373,26 @@@ tracepoint_add_probe(const char *name, 
   * tracepoint_probe_register -  Connect a probe to a tracepoint
   * @name: tracepoint name
   * @probe: probe handler
+  * @data: probe private data
+  *
+  * Returns:
+  * - 0 if the probe was successfully registered, and tracepoint
+  *   callsites are currently loaded for that probe,
+  * - -ENODEV if the probe was successfully registered, but no tracepoint
+  *   callsite is currently loaded for that probe,
+  * - other negative error value on error.
+  *
+  * When tracepoint_probe_register() returns either 0 or -ENODEV,
+  * parameters @name, @probe, and @data may be used by the tracepoint
+  * infrastructure until the probe is unregistered.
   *
-  * Returns 0 if ok, error value on error.
   * The probe address must at least be aligned on the architecture pointer size.
   */
  int tracepoint_probe_register(const char *name, void *probe, void *data)
  {
        struct tracepoint_func *old;
+       struct tracepoint_entry *entry;
+       int ret = 0;
  
        mutex_lock(&tracepoints_mutex);
        old = tracepoint_add_probe(name, probe, data);
                return PTR_ERR(old);
        }
        tracepoint_update_probes();             /* may update entry */
+       entry = get_tracepoint(name);
+       /* Make sure the entry was enabled */
+       if (!entry || !entry->enabled)
+               ret = -ENODEV;
        mutex_unlock(&tracepoints_mutex);
        release_probes(old);
-       return 0;
+       return ret;
  }
  EXPORT_SYMBOL_GPL(tracepoint_probe_register);
  
@@@ -415,6 -432,7 +432,7 @@@ tracepoint_remove_probe(const char *nam
   * tracepoint_probe_unregister -  Disconnect a probe from a tracepoint
   * @name: tracepoint name
   * @probe: probe function pointer
+  * @data: probe private data
   *
   * We do not need to call a synchronize_sched to make sure the probes have
   * finished running before doing a module unload, because the module unload
@@@ -438,215 -456,22 +456,27 @@@ int tracepoint_probe_unregister(const c
  }
  EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
  
- static LIST_HEAD(old_probes);
- static int need_update;
- static void tracepoint_add_old_probes(void *old)
- {
-       need_update = 1;
-       if (old) {
-               struct tp_probes *tp_probes = container_of(old,
-                       struct tp_probes, probes[0]);
-               list_add(&tp_probes->u.list, &old_probes);
-       }
- }
- /**
-  * tracepoint_probe_register_noupdate -  register a probe but not connect
-  * @name: tracepoint name
-  * @probe: probe handler
-  *
-  * caller must call tracepoint_probe_update_all()
-  */
- int tracepoint_probe_register_noupdate(const char *name, void *probe,
-                                      void *data)
- {
-       struct tracepoint_func *old;
-       mutex_lock(&tracepoints_mutex);
-       old = tracepoint_add_probe(name, probe, data);
-       if (IS_ERR(old)) {
-               mutex_unlock(&tracepoints_mutex);
-               return PTR_ERR(old);
-       }
-       tracepoint_add_old_probes(old);
-       mutex_unlock(&tracepoints_mutex);
-       return 0;
- }
- EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
- /**
-  * tracepoint_probe_unregister_noupdate -  remove a probe but not disconnect
-  * @name: tracepoint name
-  * @probe: probe function pointer
-  *
-  * caller must call tracepoint_probe_update_all()
-  */
- int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
-                                        void *data)
- {
-       struct tracepoint_func *old;
-       mutex_lock(&tracepoints_mutex);
-       old = tracepoint_remove_probe(name, probe, data);
-       if (IS_ERR(old)) {
-               mutex_unlock(&tracepoints_mutex);
-               return PTR_ERR(old);
-       }
-       tracepoint_add_old_probes(old);
-       mutex_unlock(&tracepoints_mutex);
-       return 0;
- }
- EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate);
- /**
-  * tracepoint_probe_update_all -  update tracepoints
-  */
- void tracepoint_probe_update_all(void)
- {
-       LIST_HEAD(release_probes);
-       struct tp_probes *pos, *next;
-       mutex_lock(&tracepoints_mutex);
-       if (!need_update) {
-               mutex_unlock(&tracepoints_mutex);
-               return;
-       }
-       if (!list_empty(&old_probes))
-               list_replace_init(&old_probes, &release_probes);
-       need_update = 0;
-       tracepoint_update_probes();
-       mutex_unlock(&tracepoints_mutex);
-       list_for_each_entry_safe(pos, next, &release_probes, u.list) {
-               list_del(&pos->u.list);
-               call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
-       }
- }
- EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
- /**
-  * tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
-  * @tracepoint: current tracepoints (in), next tracepoint (out)
-  * @begin: beginning of the range
-  * @end: end of the range
-  *
-  * Returns whether a next tracepoint has been found (1) or not (0).
-  * Will return the first tracepoint in the range if the input tracepoint is
-  * NULL.
-  */
- static int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
-       struct tracepoint * const *begin, struct tracepoint * const *end)
- {
-       if (!*tracepoint && begin != end) {
-               *tracepoint = begin;
-               return 1;
-       }
-       if (*tracepoint >= begin && *tracepoint < end)
-               return 1;
-       return 0;
- }
- #ifdef CONFIG_MODULES
- static void tracepoint_get_iter(struct tracepoint_iter *iter)
- {
-       int found = 0;
-       struct tp_module *iter_mod;
-       /* Core kernel tracepoints */
-       if (!iter->module) {
-               found = tracepoint_get_iter_range(&iter->tracepoint,
-                               __start___tracepoints_ptrs,
-                               __stop___tracepoints_ptrs);
-               if (found)
-                       goto end;
-       }
-       /* Tracepoints in modules */
-       mutex_lock(&tracepoints_mutex);
-       list_for_each_entry(iter_mod, &tracepoint_module_list, list) {
-               /*
-                * Sorted module list
-                */
-               if (iter_mod < iter->module)
-                       continue;
-               else if (iter_mod > iter->module)
-                       iter->tracepoint = NULL;
-               found = tracepoint_get_iter_range(&iter->tracepoint,
-                       iter_mod->tracepoints_ptrs,
-                       iter_mod->tracepoints_ptrs
-                               + iter_mod->num_tracepoints);
-               if (found) {
-                       iter->module = iter_mod;
-                       break;
-               }
-       }
-       mutex_unlock(&tracepoints_mutex);
- end:
-       if (!found)
-               tracepoint_iter_reset(iter);
- }
- #else /* CONFIG_MODULES */
- static void tracepoint_get_iter(struct tracepoint_iter *iter)
- {
-       int found = 0;
-       /* Core kernel tracepoints */
-       found = tracepoint_get_iter_range(&iter->tracepoint,
-                       __start___tracepoints_ptrs,
-                       __stop___tracepoints_ptrs);
-       if (!found)
-               tracepoint_iter_reset(iter);
- }
- #endif /* CONFIG_MODULES */
- void tracepoint_iter_start(struct tracepoint_iter *iter)
- {
-       tracepoint_get_iter(iter);
- }
- EXPORT_SYMBOL_GPL(tracepoint_iter_start);
- void tracepoint_iter_next(struct tracepoint_iter *iter)
- {
-       iter->tracepoint++;
-       /*
-        * iter->tracepoint may be invalid because we blindly incremented it.
-        * Make sure it is valid by marshalling on the tracepoints, getting the
-        * tracepoints from following modules if necessary.
-        */
-       tracepoint_get_iter(iter);
- }
- EXPORT_SYMBOL_GPL(tracepoint_iter_next);
- void tracepoint_iter_stop(struct tracepoint_iter *iter)
- {
- }
- EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
- void tracepoint_iter_reset(struct tracepoint_iter *iter)
- {
- #ifdef CONFIG_MODULES
-       iter->module = NULL;
- #endif /* CONFIG_MODULES */
-       iter->tracepoint = NULL;
- }
- EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
  
  #ifdef CONFIG_MODULES
 +bool trace_module_has_bad_taint(struct module *mod)
 +{
 +      return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP));
 +}
 +
  static int tracepoint_module_coming(struct module *mod)
  {
-       struct tp_module *tp_mod, *iter;
+       struct tp_module *tp_mod;
        int ret = 0;
  
+       if (!mod->num_tracepoints)
+               return 0;
        /*
         * We skip modules that taint the kernel, especially those with different
         * module headers (for forced load), to make sure we don't cause a crash.
         * Staging and out-of-tree GPL modules are fine.
         */
 -      if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
 +      if (trace_module_has_bad_taint(mod))
                return 0;
        mutex_lock(&tracepoints_mutex);
        tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
        }
        tp_mod->num_tracepoints = mod->num_tracepoints;
        tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs;
-       /*
-        * tracepoint_module_list is kept sorted by struct module pointer
-        * address for iteration on tracepoints from a seq_file that can release
-        * the mutex between calls.
-        */
-       list_for_each_entry_reverse(iter, &tracepoint_module_list, list) {
-               BUG_ON(iter == tp_mod); /* Should never be in the list twice */
-               if (iter < tp_mod) {
-                       /* We belong to the location right after iter. */
-                       list_add(&tp_mod->list, &iter->list);
-                       goto module_added;
-               }
-       }
-       /* We belong to the beginning of the list */
-       list_add(&tp_mod->list, &tracepoint_module_list);
- module_added:
+       list_add_tail(&tp_mod->list, &tracepoint_module_list);
        tracepoint_update_probe_range(mod->tracepoints_ptrs,
                mod->tracepoints_ptrs + mod->num_tracepoints);
  end:
@@@ -684,6 -493,9 +498,9 @@@ static int tracepoint_module_going(stru
  {
        struct tp_module *pos;
  
+       if (!mod->num_tracepoints)
+               return 0;
        mutex_lock(&tracepoints_mutex);
        tracepoint_update_probe_range(mod->tracepoints_ptrs,
                mod->tracepoints_ptrs + mod->num_tracepoints);