]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
Merge branch 'linus' into tracing/core
authorIngo Molnar <mingo@elte.hu>
Mon, 13 Apr 2009 22:02:16 +0000 (00:02 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 13 Apr 2009 22:02:22 +0000 (00:02 +0200)
Merge reason: merge latest tracing fixes to avoid conflicts in
              kernel/trace/trace_events_filter.c with upcoming change

Signed-off-by: Ingo Molnar <mingo@elte.hu>
1  2 
kernel/trace/Kconfig
kernel/trace/trace_events.c
kernel/trace/trace_events_filter.c
kernel/trace/trace_events_stage_2.h

diff --combined kernel/trace/Kconfig
index 644606e899fa0679409b9f26018fc96b47af7790,417d1985e29911784adbf506523462f009933c02..57981d338d1fd1416f941ce558d4ee946dd87ee9
@@@ -48,9 -48,6 +48,9 @@@ config FTRACE_NMI_ENTE
         depends on HAVE_FTRACE_NMI_ENTER
         default y
  
 +config EVENT_TRACING
 +      bool
 +
  config TRACING
        bool
        select DEBUG_FS
@@@ -59,7 -56,6 +59,7 @@@
        select TRACEPOINTS
        select NOP_TRACER
        select BINARY_PRINTF
 +      select EVENT_TRACING
  
  #
  # Minimum requirements an architecture has to meet for us to
@@@ -108,7 -104,6 +108,7 @@@ config FUNCTION_GRAPH_TRACE
          the return value. This is done by setting the current return 
          address on the current task structure into a stack of calls.
  
 +
  config IRQSOFF_TRACER
        bool "Interrupts-off Latency Tracer"
        default n
@@@ -317,7 -312,7 +317,7 @@@ config KMEMTRAC
          and profile kernel code.
  
          This requires an userspace application to use. See
-         Documentation/vm/kmemtrace.txt for more information.
+         Documentation/trace/kmemtrace.txt for more information.
  
          Saying Y will make the kernel somewhat larger and slower. However,
          if you disable kmemtrace at run-time or boot-time, the performance
@@@ -380,20 -375,6 +380,20 @@@ config DYNAMIC_FTRAC
         were made. If so, it runs stop_machine (stops all CPUS)
         and modifies the code to jump over the call to ftrace.
  
 +config FUNCTION_PROFILER
 +      bool "Kernel function profiler"
 +      depends on FUNCTION_TRACER
 +      default n
 +      help
 +       This option enables the kernel function profiler. A file is created
 +       in debugfs called function_profile_enabled which defaults to zero.
 +       When a 1 is echoed into this file profiling begins, and when a
 +       zero is entered, profiling stops. A file in the trace_stats
 +       directory called functions, that show the list of functions that
 +       have been hit and their counters.
 +
 +       If in doubt, say N
 +
  config FTRACE_MCOUNT_RECORD
        def_bool y
        depends on DYNAMIC_FTRACE
@@@ -422,7 -403,7 +422,7 @@@ config MMIOTRAC
          implementation and works via page faults. Tracing is disabled by
          default and can be enabled at run-time.
  
-         See Documentation/tracers/mmiotrace.txt.
+         See Documentation/trace/mmiotrace.txt.
          If you are not helping to develop drivers, say N.
  
  config MMIOTRACE_TEST
index be9299a53e2a8a783d56478f6e16b18431575b88,576f4fa2af0da22cad87d1c20fb044d89c16a066..789e14eb09a5c2f60e045cb472109583171d013f
@@@ -503,6 -503,7 +503,7 @@@ event_filter_write(struct file *filp, c
  
        if (copy_from_user(&buf, ubuf, cnt))
                return -EFAULT;
+       buf[cnt] = '\0';
  
        pred = kzalloc(sizeof(*pred), GFP_KERNEL);
        if (!pred)
                return cnt;
        }
  
-       if (filter_add_pred(call, pred)) {
+       err = filter_add_pred(call, pred);
+       if (err < 0) {
                filter_free_pred(pred);
-               return -EINVAL;
+               return err;
        }
  
        *ppos += cnt;
@@@ -569,6 -571,7 +571,7 @@@ subsystem_filter_write(struct file *fil
  
        if (copy_from_user(&buf, ubuf, cnt))
                return -EFAULT;
+       buf[cnt] = '\0';
  
        pred = kzalloc(sizeof(*pred), GFP_KERNEL);
        if (!pred)
                return cnt;
        }
  
-       if (filter_add_subsystem_pred(system, pred)) {
+       err = filter_add_subsystem_pred(system, pred);
+       if (err < 0) {
                filter_free_subsystem_preds(system);
                filter_free_pred(pred);
-               return -EINVAL;
+               return err;
        }
  
        *ppos += cnt;
@@@ -680,7 -684,6 +684,7 @@@ static struct dentry 
  event_subsystem_dir(const char *name, struct dentry *d_events)
  {
        struct event_subsystem *system;
 +      struct dentry *entry;
  
        /* First see if we did not already create this dir */
        list_for_each_entry(system, &event_subsystems, list) {
  
        system->preds = NULL;
  
 +      entry = debugfs_create_file("filter", 0644, system->entry, system,
 +                                  &ftrace_subsystem_filter_fops);
 +      if (!entry)
 +              pr_warning("Could not create debugfs "
 +                         "'%s/filter' entry\n", name);
 +
        return system->entry;
  }
  
index 470ad9487eccc71fab90e1f640dfa2cf3b7aa355,e03cbf1e38f36b306f8eafd2ed2d79837b901a37..9f8ecca34a5950d98b3c86b8636ade46ef2cf457
@@@ -185,7 -185,7 +185,7 @@@ void filter_free_subsystem_preds(struc
        }
  
        events_for_each(call) {
 -              if (!call->name || !call->regfunc)
 +              if (!call->define_fields)
                        continue;
  
                if (!strcmp(call->system, system->name))
@@@ -215,7 -215,7 +215,7 @@@ static int __filter_add_pred(struct ftr
                }
        }
  
-       return -ENOMEM;
+       return -ENOSPC;
  }
  
  static int is_string_field(const char *type)
@@@ -319,12 -319,12 +319,12 @@@ int filter_add_subsystem_pred(struct ev
        }
  
        if (i == MAX_FILTER_PRED)
-               return -EINVAL;
+               return -ENOSPC;
  
        events_for_each(call) {
                int err;
  
 -              if (!call->name || !call->regfunc)
 +              if (!call->define_fields)
                        continue;
  
                if (strcmp(call->system, system->name))
@@@ -410,16 -410,22 +410,22 @@@ int filter_parse(char **pbuf, struct fi
                }
        }
  
+       if (!val_str) {
+               pred->field_name = NULL;
+               return -EINVAL;
+       }
        pred->field_name = kstrdup(pred->field_name, GFP_KERNEL);
        if (!pred->field_name)
                return -ENOMEM;
  
-       pred->val = simple_strtoull(val_str, &tmp, 10);
+       pred->val = simple_strtoull(val_str, &tmp, 0);
        if (tmp == val_str) {
                pred->str_val = kstrdup(val_str, GFP_KERNEL);
                if (!pred->str_val)
                        return -ENOMEM;
-       }
+       } else if (*tmp != '\0')
+               return -EINVAL;
  
        return 0;
  }
index 1c94b87c7180d0865600dbdb2ef1682a355f6a69,d363c6672c6c6aa85af3b40066af8e02d296c1b5..02fb710193ed4b74c94c0fd4d4dfbba589ebaa3f
@@@ -105,10 -105,10 +105,10 @@@ ftrace_raw_output_##call(struct trace_i
                return 0;
  
  #undef __entry
- #define __entry "REC"
+ #define __entry REC
  
  #undef TP_printk
- #define TP_printk(fmt, args...) "%s, %s\n", #fmt, #args
+ #define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
  
  #undef TP_fast_assign
  #define TP_fast_assign(args...) args
@@@ -146,6 -146,13 +146,6 @@@ ftrace_format_##call(struct trace_seq *
        if (ret)                                                        \
                return ret;
  
 -#define __common_field(type, item)                                    \
 -      ret = trace_define_field(event_call, #type, "common_" #item,    \
 -                               offsetof(typeof(field.ent), item),     \
 -                               sizeof(field.ent.item));               \
 -      if (ret)                                                        \
 -              return ret;
 -
  #undef TRACE_EVENT
  #define TRACE_EVENT(call, proto, args, tstruct, func, print)          \
  int                                                                   \