From: Stephen Rothwell Date: Thu, 5 Nov 2015 02:50:20 +0000 (+1100) Subject: Merge remote-tracking branch 'ftrace/for-next' X-Git-Tag: KARO-TXUL-2015-12-04~64 X-Git-Url: https://git.karo-electronics.de/?p=karo-tx-linux.git;a=commitdiff_plain;h=e14477cb27beecfe79382961ab84327e1c2bc760 Merge remote-tracking branch 'ftrace/for-next' --- e14477cb27beecfe79382961ab84327e1c2bc760 diff --cc kernel/trace/trace_events.c index 7ca09cdc20c2,bee1e1530052..6bbc5f652355 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@@ -446,6 -469,142 +469,142 @@@ static void ftrace_clear_events(struct mutex_unlock(&event_mutex); } + static int cmp_pid(const void *key, const void *elt) + { + const pid_t *search_pid = key; + const pid_t *pid = elt; + + if (*search_pid == *pid) + return 0; + if (*search_pid < *pid) + return -1; + return 1; + } + + static bool + check_ignore_pid(struct trace_pid_list *filtered_pids, struct task_struct *task) + { + pid_t search_pid; + pid_t *pid; + + /* + * Return false, because if filtered_pids does not exist, + * all pids are good to trace. + */ + if (!filtered_pids) + return false; + + search_pid = task->pid; + + pid = bsearch(&search_pid, filtered_pids->pids, + filtered_pids->nr_pids, sizeof(pid_t), + cmp_pid); + if (!pid) + return true; + + return false; + } + + static void -event_filter_pid_sched_switch_probe_pre(void *data, ++event_filter_pid_sched_switch_probe_pre(void *data, bool preempt, + struct task_struct *prev, struct task_struct *next) + { + struct trace_array *tr = data; + struct trace_pid_list *pid_list; + + pid_list = rcu_dereference_sched(tr->filtered_pids); + + this_cpu_write(tr->trace_buffer.data->ignore_pid, + check_ignore_pid(pid_list, prev) && + check_ignore_pid(pid_list, next)); + } + + static void -event_filter_pid_sched_switch_probe_post(void *data, ++event_filter_pid_sched_switch_probe_post(void *data, bool preempt, + struct task_struct *prev, struct task_struct *next) + { + struct trace_array *tr = data; + struct trace_pid_list *pid_list; + + pid_list = rcu_dereference_sched(tr->filtered_pids); + + this_cpu_write(tr->trace_buffer.data->ignore_pid, + check_ignore_pid(pid_list, next)); + } + + static void + event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task) + { + struct trace_array *tr = data; + struct trace_pid_list *pid_list; + + /* Nothing to do if we are already tracing */ + if (!this_cpu_read(tr->trace_buffer.data->ignore_pid)) + return; + + pid_list = rcu_dereference_sched(tr->filtered_pids); + + this_cpu_write(tr->trace_buffer.data->ignore_pid, + check_ignore_pid(pid_list, task)); + } + + static void + event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task) + { + struct trace_array *tr = data; + struct trace_pid_list *pid_list; + + /* Nothing to do if we are not tracing */ + if (this_cpu_read(tr->trace_buffer.data->ignore_pid)) + return; + + pid_list = rcu_dereference_sched(tr->filtered_pids); + + /* Set tracing if current is enabled */ + this_cpu_write(tr->trace_buffer.data->ignore_pid, + check_ignore_pid(pid_list, current)); + } + + static void __ftrace_clear_event_pids(struct trace_array *tr) + { + struct trace_pid_list *pid_list; + struct trace_event_file *file; + int cpu; + + pid_list = rcu_dereference_protected(tr->filtered_pids, + lockdep_is_held(&event_mutex)); + if (!pid_list) + return; + + unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr); + unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr); + + unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr); + unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr); + + list_for_each_entry(file, &tr->events, list) { + clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); + } + + for_each_possible_cpu(cpu) + per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false; + + rcu_assign_pointer(tr->filtered_pids, NULL); + + /* Wait till all users are no longer using pid filtering */ + synchronize_sched(); + + free_pages((unsigned long)pid_list->pids, pid_list->order); + kfree(pid_list); + } + + static void ftrace_clear_event_pids(struct trace_array *tr) + { + mutex_lock(&event_mutex); + __ftrace_clear_event_pids(tr); + mutex_unlock(&event_mutex); + } + static void __put_system(struct event_subsystem *system) { struct event_filter *filter = system->filter; diff --cc kernel/trace/trace_stack.c index 8abf1ba18085,0bd212af406c..dda9e6742950 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@@ -85,19 -91,9 +91,19 @@@ check_stack(unsigned long ip, unsigned if (!object_is_on_stack(stack)) return; + /* Can't do this from NMI context (can cause deadlocks) */ + if (in_nmi()) + return; + local_irq_save(flags); - arch_spin_lock(&max_stack_lock); + arch_spin_lock(&stack_trace_max_lock); + /* + * RCU may not be watching, make it see us. + * The stack trace code uses rcu_sched. + */ + rcu_irq_enter(); + /* In case another CPU set the tracer_frame on us */ if (unlikely(!frame_size)) this_size -= tracer_frame; @@@ -179,8 -175,7 +185,8 @@@ } out: + rcu_irq_exit(); - arch_spin_unlock(&max_stack_lock); + arch_spin_unlock(&stack_trace_max_lock); local_irq_restore(flags); }