]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'ftrace/for-next'
authorStephen Rothwell <sfr@canb.auug.org.au>
Thu, 5 Nov 2015 02:50:20 +0000 (13:50 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 5 Nov 2015 02:50:23 +0000 (13:50 +1100)
1  2 
kernel/trace/blktrace.c
kernel/trace/ftrace.c
kernel/trace/trace_events.c
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_stack.c

Simple merge
Simple merge
index 7ca09cdc20c2f920faa004e32eb248e3bc92bf61,bee1e153005278b63676622207b4f606e2c84f72..6bbc5f652355745d24f6252a93d7b437a0efea15
@@@ -446,6 -469,142 +469,142 @@@ static void ftrace_clear_events(struct 
        mutex_unlock(&event_mutex);
  }
  
 -event_filter_pid_sched_switch_probe_pre(void *data,
+ static int cmp_pid(const void *key, const void *elt)
+ {
+       const pid_t *search_pid = key;
+       const pid_t *pid = elt;
+       if (*search_pid == *pid)
+               return 0;
+       if (*search_pid < *pid)
+               return -1;
+       return 1;
+ }
+ static bool
+ check_ignore_pid(struct trace_pid_list *filtered_pids, struct task_struct *task)
+ {
+       pid_t search_pid;
+       pid_t *pid;
+       /*
+        * Return false, because if filtered_pids does not exist,
+        * all pids are good to trace.
+        */
+       if (!filtered_pids)
+               return false;
+       search_pid = task->pid;
+       pid = bsearch(&search_pid, filtered_pids->pids,
+                     filtered_pids->nr_pids, sizeof(pid_t),
+                     cmp_pid);
+       if (!pid)
+               return true;
+       return false;
+ }
+ static void
 -event_filter_pid_sched_switch_probe_post(void *data,
++event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
+                   struct task_struct *prev, struct task_struct *next)
+ {
+       struct trace_array *tr = data;
+       struct trace_pid_list *pid_list;
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+                      check_ignore_pid(pid_list, prev) &&
+                      check_ignore_pid(pid_list, next));
+ }
+ static void
++event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
+                   struct task_struct *prev, struct task_struct *next)
+ {
+       struct trace_array *tr = data;
+       struct trace_pid_list *pid_list;
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+                      check_ignore_pid(pid_list, next));
+ }
+ static void
+ event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
+ {
+       struct trace_array *tr = data;
+       struct trace_pid_list *pid_list;
+       /* Nothing to do if we are already tracing */
+       if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
+               return;
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+                      check_ignore_pid(pid_list, task));
+ }
+ static void
+ event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
+ {
+       struct trace_array *tr = data;
+       struct trace_pid_list *pid_list;
+       /* Nothing to do if we are not tracing */
+       if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
+               return;
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+       /* Set tracing if current is enabled */
+       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+                      check_ignore_pid(pid_list, current));
+ }
+ static void __ftrace_clear_event_pids(struct trace_array *tr)
+ {
+       struct trace_pid_list *pid_list;
+       struct trace_event_file *file;
+       int cpu;
+       pid_list = rcu_dereference_protected(tr->filtered_pids,
+                                            lockdep_is_held(&event_mutex));
+       if (!pid_list)
+               return;
+       unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
+       unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
+       unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
+       unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
+       list_for_each_entry(file, &tr->events, list) {
+               clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
+       }
+       for_each_possible_cpu(cpu)
+               per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
+       rcu_assign_pointer(tr->filtered_pids, NULL);
+       /* Wait till all users are no longer using pid filtering */
+       synchronize_sched();
+       free_pages((unsigned long)pid_list->pids, pid_list->order);
+       kfree(pid_list);
+ }
+ static void ftrace_clear_event_pids(struct trace_array *tr)
+ {
+       mutex_lock(&event_mutex);
+       __ftrace_clear_event_pids(tr);
+       mutex_unlock(&event_mutex);
+ }
  static void __put_system(struct event_subsystem *system)
  {
        struct event_filter *filter = system->filter;
Simple merge
index 8abf1ba18085742af78176dbc514095a47643c9c,0bd212af406c49fc64ad308112e9ad7fab1eb4ff..dda9e6742950305f36fbe920f9fe0c6f68d83fbf
@@@ -85,19 -91,9 +91,19 @@@ check_stack(unsigned long ip, unsigned 
        if (!object_is_on_stack(stack))
                return;
  
 +      /* Can't do this from NMI context (can cause deadlocks) */
 +      if (in_nmi())
 +              return;
 +
        local_irq_save(flags);
-       arch_spin_lock(&max_stack_lock);
+       arch_spin_lock(&stack_trace_max_lock);
  
 +      /*
 +       * RCU may not be watching, make it see us.
 +       * The stack trace code uses rcu_sched.
 +       */
 +      rcu_irq_enter();
 +
        /* In case another CPU set the tracer_frame on us */
        if (unlikely(!frame_size))
                this_size -= tracer_frame;
        }
  
   out:
-       arch_spin_unlock(&max_stack_lock);
 +      rcu_irq_exit();
+       arch_spin_unlock(&stack_trace_max_lock);
        local_irq_restore(flags);
  }