]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 9 Aug 2009 21:58:09 +0000 (14:58 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 9 Aug 2009 21:58:09 +0000 (14:58 -0700)
* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm/i915: silence vblank warnings
  drm: silence pointless vblank warning.
  drm: When adding probed modes, preserve duplicate mode types

19 files changed:
Documentation/lockdep-design.txt
arch/x86/kernel/apic/x2apic_cluster.c
arch/x86/kernel/apic/x2apic_phys.c
arch/x86/kernel/efi.c
arch/x86/kernel/reboot.c
arch/x86/kernel/vmi_32.c
include/linux/ftrace_event.h
include/linux/perf_counter.h
include/trace/ftrace.h
kernel/lockdep_proc.c
kernel/perf_counter.c
kernel/posix-cpu-timers.c
kernel/rtmutex.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_events_filter.c
scripts/recordmcount.pl
tools/perf/builtin-record.c

index e20d913d5914a29261d12f5fa4585164ed170c82..abf768c681e208c3fe5c4d7c5fc27f788c6c29c7 100644 (file)
@@ -30,9 +30,9 @@ State
 The validator tracks lock-class usage history into 4n + 1 separate state bits:
 
 - 'ever held in STATE context'
-- 'ever head as readlock in STATE context'
-- 'ever head with STATE enabled'
-- 'ever head as readlock with STATE enabled'
+- 'ever held as readlock in STATE context'
+- 'ever held with STATE enabled'
+- 'ever held as readlock with STATE enabled'
 
 Where STATE can be either one of (kernel/lockdep_states.h)
  - hardirq
index 2ed4e2bb3b3223b687b5d7f8f99bc65076674ba2..a5371ec367769a83b321c23eed6c8d2fde403797 100644 (file)
@@ -17,11 +17,13 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
        return x2apic_enabled();
 }
 
-/* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */
-
+/*
+ * need to use more than cpu 0, because we need more vectors when
+ * MSI-X are used.
+ */
 static const struct cpumask *x2apic_target_cpus(void)
 {
-       return cpumask_of(0);
+       return cpu_online_mask;
 }
 
 /*
index 0b631c6a2e00fdae492100a500886c70a34a3492..a8989aadc99a5906fc514bcfa863c728143dd2fb 100644 (file)
@@ -27,11 +27,13 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
                return 0;
 }
 
-/* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */
-
+/*
+ * need to use more than cpu 0, because we need more vectors when
+ * MSI-X are used.
+ */
 static const struct cpumask *x2apic_target_cpus(void)
 {
-       return cpumask_of(0);
+       return cpu_online_mask;
 }
 
 static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
index 19ccf6d0dccf51ea5e90ce16f229d530b057a0d5..fe26ba3e3451872c083c10b6ccf62651fcd762a0 100644 (file)
@@ -354,7 +354,7 @@ void __init efi_init(void)
         */
        c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2);
        if (c16) {
-               for (i = 0; i < sizeof(vendor) && *c16; ++i)
+               for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i)
                        vendor[i] = *c16++;
                vendor[i] = '\0';
        } else
index 834c9da8bf9dae8a0c57eda519d03c3e29307794..9eb897603705724ce9daaedaf62ba778aff193dc 100644 (file)
@@ -405,7 +405,7 @@ EXPORT_SYMBOL(machine_real_restart);
 #endif /* CONFIG_X86_32 */
 
 /*
- * Apple MacBook5,2 (2009 MacBook) needs reboot=p
+ * Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot
  */
 static int __init set_pci_reboot(const struct dmi_system_id *d)
 {
@@ -426,6 +426,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"),
                },
        },
+       {       /* Handle problems with rebooting on Apple MacBookPro5,1 */
+               .callback = set_pci_reboot,
+               .ident = "Apple MacBookPro5,1",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,1"),
+               },
+       },
        { }
 };
 
index b263423fbe2ae971424c5bd99c112ac984413383..95a7289e4b0cdd8cc7b88f0504a93b205f2a6934 100644 (file)
@@ -441,7 +441,7 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
        ap.ds = __USER_DS;
        ap.es = __USER_DS;
        ap.fs = __KERNEL_PERCPU;
-       ap.gs = 0;
+       ap.gs = __KERNEL_STACK_CANARY;
 
        ap.eflags = 0;
 
index d7cd193c2277a1191e16f5d5fcd93d5ee9a2b6d5..a81170de7f6bf4907faf644495831aad0c8fc30a 100644 (file)
@@ -89,7 +89,9 @@ enum print_line_t {
        TRACE_TYPE_NO_CONSUME   = 3     /* Handled but ask to not consume */
 };
 
-
+void tracing_generic_entry_update(struct trace_entry *entry,
+                                 unsigned long flags,
+                                 int pc);
 struct ring_buffer_event *
 trace_current_buffer_lock_reserve(int type, unsigned long len,
                                  unsigned long flags, int pc);
index e604e6ef72dd5af13b6265d688451cef47fd56fa..a67dd5c5b6d3b72db134d4d81ed9a0c0aa6d9be9 100644 (file)
@@ -121,8 +121,9 @@ enum perf_counter_sample_format {
        PERF_SAMPLE_CPU                         = 1U << 7,
        PERF_SAMPLE_PERIOD                      = 1U << 8,
        PERF_SAMPLE_STREAM_ID                   = 1U << 9,
+       PERF_SAMPLE_TP_RECORD                   = 1U << 10,
 
-       PERF_SAMPLE_MAX = 1U << 10,             /* non-ABI */
+       PERF_SAMPLE_MAX = 1U << 11,             /* non-ABI */
 };
 
 /*
@@ -413,6 +414,11 @@ struct perf_callchain_entry {
        __u64                           ip[PERF_MAX_STACK_DEPTH];
 };
 
+struct perf_tracepoint_record {
+       int                             size;
+       char                            *record;
+};
+
 struct task_struct;
 
 /**
@@ -681,6 +687,7 @@ struct perf_sample_data {
        struct pt_regs                  *regs;
        u64                             addr;
        u64                             period;
+       void                            *private;
 };
 
 extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
index 1867553c61e595839428e2c394787e205228297d..7fb16d90e7b1334ba431e0f7952c78931a58f567 100644 (file)
 #undef TP_fast_assign
 #define TP_fast_assign(args...) args
 
+#undef TP_perf_assign
+#define TP_perf_assign(args...)
+
 #undef TRACE_EVENT
 #define TRACE_EVENT(call, proto, args, tstruct, func, print)           \
 static int                                                             \
@@ -345,6 +348,56 @@ static inline int ftrace_get_offsets_##call(                               \
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
+#ifdef CONFIG_EVENT_PROFILE
+
+/*
+ * Generate the functions needed for tracepoint perf_counter support.
+ *
+ * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
+ *
+ * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
+ * {
+ *     int ret = 0;
+ *
+ *     if (!atomic_inc_return(&event_call->profile_count))
+ *             ret = register_trace_<call>(ftrace_profile_<call>);
+ *
+ *     return ret;
+ * }
+ *
+ * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
+ * {
+ *     if (atomic_add_negative(-1, &event->call->profile_count))
+ *             unregister_trace_<call>(ftrace_profile_<call>);
+ * }
+ *
+ */
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print)         \
+                                                                       \
+static void ftrace_profile_##call(proto);                              \
+                                                                       \
+static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
+{                                                                      \
+       int ret = 0;                                                    \
+                                                                       \
+       if (!atomic_inc_return(&event_call->profile_count))             \
+               ret = register_trace_##call(ftrace_profile_##call);     \
+                                                                       \
+       return ret;                                                     \
+}                                                                      \
+                                                                       \
+static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
+{                                                                      \
+       if (atomic_add_negative(-1, &event_call->profile_count))        \
+               unregister_trace_##call(ftrace_profile_##call);         \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#endif
+
 /*
  * Stage 4 of the trace events.
  *
@@ -447,28 +500,6 @@ static inline int ftrace_get_offsets_##call(                               \
 #define TP_FMT(fmt, args...)   fmt "\n", ##args
 
 #ifdef CONFIG_EVENT_PROFILE
-#define _TRACE_PROFILE(call, proto, args)                              \
-static void ftrace_profile_##call(proto)                               \
-{                                                                      \
-       extern void perf_tpcounter_event(int);                          \
-       perf_tpcounter_event(event_##call.id);                          \
-}                                                                      \
-                                                                       \
-static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
-{                                                                      \
-       int ret = 0;                                                    \
-                                                                       \
-       if (!atomic_inc_return(&event_call->profile_count))             \
-               ret = register_trace_##call(ftrace_profile_##call);     \
-                                                                       \
-       return ret;                                                     \
-}                                                                      \
-                                                                       \
-static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
-{                                                                      \
-       if (atomic_add_negative(-1, &event_call->profile_count))        \
-               unregister_trace_##call(ftrace_profile_##call);         \
-}
 
 #define _TRACE_PROFILE_INIT(call)                                      \
        .profile_count = ATOMIC_INIT(-1),                               \
@@ -476,7 +507,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
        .profile_disable = ftrace_profile_disable_##call,
 
 #else
-#define _TRACE_PROFILE(call, proto, args)
 #define _TRACE_PROFILE_INIT(call)
 #endif
 
@@ -502,7 +532,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
 
 #undef TRACE_EVENT
 #define TRACE_EVENT(call, proto, args, tstruct, assign, print)         \
-_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args))                      \
                                                                        \
 static struct ftrace_event_call event_##call;                          \
                                                                        \
@@ -586,6 +615,99 @@ __attribute__((section("_ftrace_events"))) event_##call = {                \
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
-#undef _TRACE_PROFILE
+/*
+ * Define the insertion callback to profile events
+ *
+ * The job is very similar to ftrace_raw_event_<call> except that we don't
+ * insert in the ring buffer but in a perf counter.
+ *
+ * static void ftrace_profile_<call>(proto)
+ * {
+ *     struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
+ *     struct ftrace_event_call *event_call = &event_<call>;
+ *     extern void perf_tpcounter_event(int, u64, u64, void *, int);
+ *     struct ftrace_raw_##call *entry;
+ *     u64 __addr = 0, __count = 1;
+ *     unsigned long irq_flags;
+ *     int __entry_size;
+ *     int __data_size;
+ *     int pc;
+ *
+ *     local_save_flags(irq_flags);
+ *     pc = preempt_count();
+ *
+ *     __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
+ *     __entry_size = __data_size + sizeof(*entry);
+ *
+ *     do {
+ *             char raw_data[__entry_size]; <- allocate our sample in the stack
+ *             struct trace_entry *ent;
+ *
+ *             entry = (struct ftrace_raw_<call> *)raw_data;
+ *             ent = &entry->ent;
+ *             tracing_generic_entry_update(ent, irq_flags, pc);
+ *             ent->type = event_call->id;
+ *
+ *             <tstruct> <- do some jobs with dynamic arrays
+ *
+ *             <assign>  <- affect our values
+ *
+ *             perf_tpcounter_event(event_call->id, __addr, __count, entry,
+ *                          __entry_size);  <- submit them to perf counter
+ *     } while (0);
+ *
+ * }
+ */
+
+#ifdef CONFIG_EVENT_PROFILE
+
+#undef __perf_addr
+#define __perf_addr(a) __addr = (a)
+
+#undef __perf_count
+#define __perf_count(c) __count = (c)
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print)         \
+static void ftrace_profile_##call(proto)                               \
+{                                                                      \
+       struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
+       struct ftrace_event_call *event_call = &event_##call;           \
+       extern void perf_tpcounter_event(int, u64, u64, void *, int);   \
+       struct ftrace_raw_##call *entry;                                \
+       u64 __addr = 0, __count = 1;                                    \
+       unsigned long irq_flags;                                        \
+       int __entry_size;                                               \
+       int __data_size;                                                \
+       int pc;                                                         \
+                                                                       \
+       local_save_flags(irq_flags);                                    \
+       pc = preempt_count();                                           \
+                                                                       \
+       __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
+       __entry_size = ALIGN(__data_size + sizeof(*entry), sizeof(u64));\
+                                                                       \
+       do {                                                            \
+               char raw_data[__entry_size];                            \
+               struct trace_entry *ent;                                \
+                                                                       \
+               entry = (struct ftrace_raw_##call *)raw_data;           \
+               ent = &entry->ent;                                      \
+               tracing_generic_entry_update(ent, irq_flags, pc);       \
+               ent->type = event_call->id;                             \
+                                                                       \
+               tstruct                                                 \
+                                                                       \
+               { assign; }                                             \
+                                                                       \
+               perf_tpcounter_event(event_call->id, __addr, __count, entry,\
+                            __entry_size);                             \
+       } while (0);                                                    \
+                                                                       \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#endif /* CONFIG_EVENT_PROFILE */
+
 #undef _TRACE_PROFILE_INIT
 
index d7135aa2d2c4ccc0f3638cfa6fb3b2742e33c43c..e94caa666dba0876c6128801e3915285a797e78e 100644 (file)
@@ -758,7 +758,8 @@ static int __init lockdep_proc_init(void)
                    &proc_lockdep_stats_operations);
 
 #ifdef CONFIG_LOCK_STAT
-       proc_create("lock_stat", S_IRUSR, NULL, &proc_lock_stat_operations);
+       proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL,
+                   &proc_lock_stat_operations);
 #endif
 
        return 0;
index 673c1aaf7332dc50809f558af52780e73bec5807..868102172aa4da45b29d543c833ab5b9ab2475a3 100644 (file)
@@ -2646,6 +2646,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
                u64 counter;
        } group_entry;
        struct perf_callchain_entry *callchain = NULL;
+       struct perf_tracepoint_record *tp;
        int callchain_size = 0;
        u64 time;
        struct {
@@ -2714,6 +2715,11 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
                        header.size += sizeof(u64);
        }
 
+       if (sample_type & PERF_SAMPLE_TP_RECORD) {
+               tp = data->private;
+               header.size += tp->size;
+       }
+
        ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
        if (ret)
                return;
@@ -2777,6 +2783,9 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
                }
        }
 
+       if (sample_type & PERF_SAMPLE_TP_RECORD)
+               perf_output_copy(&handle, tp->record, tp->size);
+
        perf_output_end(&handle);
 }
 
@@ -3703,17 +3712,24 @@ static const struct pmu perf_ops_task_clock = {
 };
 
 #ifdef CONFIG_EVENT_PROFILE
-void perf_tpcounter_event(int event_id)
+void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
+                         int entry_size)
 {
+       struct perf_tracepoint_record tp = {
+               .size = entry_size,
+               .record = record,
+       };
+
        struct perf_sample_data data = {
                .regs = get_irq_regs(),
-               .addr = 0,
+               .addr = addr,
+               .private = &tp,
        };
 
        if (!data.regs)
                data.regs = task_pt_regs(current);
 
-       do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data);
+       do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data);
 }
 EXPORT_SYMBOL_GPL(perf_tpcounter_event);
 
index bece7c0b67b29d42c3c425d053bd76087b7684f3..e33a21cb9407987a0aa9b46c32baba590302862b 100644 (file)
@@ -521,11 +521,12 @@ void posix_cpu_timers_exit(struct task_struct *tsk)
 }
 void posix_cpu_timers_exit_group(struct task_struct *tsk)
 {
-       struct task_cputime cputime;
+       struct signal_struct *const sig = tsk->signal;
 
-       thread_group_cputimer(tsk, &cputime);
        cleanup_timers(tsk->signal->cpu_timers,
-                      cputime.utime, cputime.stime, cputime.sum_exec_runtime);
+                      cputime_add(tsk->utime, sig->utime),
+                      cputime_add(tsk->stime, sig->stime),
+                      tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
 }
 
 static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
index fcd107a78c5a1070c96ace2719d12bfe59a825f0..29bd4baf9e756d848895a868bd25c103f6ee1f85 100644 (file)
@@ -1039,16 +1039,14 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
        if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) {
                /* We got the lock for task. */
                debug_rt_mutex_lock(lock);
-
                rt_mutex_set_owner(lock, task, 0);
-
+               spin_unlock(&lock->wait_lock);
                rt_mutex_deadlock_account_lock(lock, task);
                return 1;
        }
 
        ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
 
-
        if (ret && !waiter->task) {
                /*
                 * Reset the return value. We might have
index bf27bb7a63e2d94c7c7537f5e847223d2a49f17d..a330513d96ce321ae0ea50e9fa648d6e9cbfc7e5 100644 (file)
@@ -735,6 +735,7 @@ ring_buffer_free(struct ring_buffer *buffer)
 
        put_online_cpus();
 
+       kfree(buffer->buffers);
        free_cpumask_var(buffer->cpumask);
 
        kfree(buffer);
@@ -1785,7 +1786,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
         */
        RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
 
-       if (!rb_try_to_discard(cpu_buffer, event))
+       if (rb_try_to_discard(cpu_buffer, event))
                goto out;
 
        /*
@@ -2383,7 +2384,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
                 * the box. Return the padding, and we will release
                 * the current locks, and try again.
                 */
-               rb_advance_reader(cpu_buffer);
                return event;
 
        case RINGBUF_TYPE_TIME_EXTEND:
@@ -2486,7 +2486,7 @@ static inline int rb_ok_to_lock(void)
         * buffer too. A one time deal is all you get from reading
         * the ring buffer from an NMI.
         */
-       if (likely(!in_nmi() && !oops_in_progress))
+       if (likely(!in_nmi()))
                return 1;
 
        tracing_off_permanent();
@@ -2519,6 +2519,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
        if (dolock)
                spin_lock(&cpu_buffer->reader_lock);
        event = rb_buffer_peek(buffer, cpu, ts);
+       if (event && event->type_len == RINGBUF_TYPE_PADDING)
+               rb_advance_reader(cpu_buffer);
        if (dolock)
                spin_unlock(&cpu_buffer->reader_lock);
        local_irq_restore(flags);
@@ -2590,12 +2592,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
                spin_lock(&cpu_buffer->reader_lock);
 
        event = rb_buffer_peek(buffer, cpu, ts);
-       if (!event)
-               goto out_unlock;
-
-       rb_advance_reader(cpu_buffer);
+       if (event)
+               rb_advance_reader(cpu_buffer);
 
- out_unlock:
        if (dolock)
                spin_unlock(&cpu_buffer->reader_lock);
        local_irq_restore(flags);
index 8930e39b9d8ca4780b57b49638c13d1bc97dcd86..c22b40f8f576c19e7c00dec5e8f1983667e7b084 100644 (file)
@@ -848,6 +848,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
                ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
                (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
 }
+EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
 
 struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
                                                    int type,
index 3548ae5cc7801e131621dff70037f26ba8d7ee01..8b9f4f6e9559a6d152e874f24f75dae235638fc9 100644 (file)
@@ -438,10 +438,6 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
                                          int *ent_cpu, u64 *ent_ts);
 
-void tracing_generic_entry_update(struct trace_entry *entry,
-                                 unsigned long flags,
-                                 int pc);
-
 void default_wait_pipe(struct trace_iterator *iter);
 void poll_wait_pipe(struct trace_iterator *iter);
 
index 936c621bbf462395cc7f814cd6bbe01d7c841514..f32dc9d1ea7b51bb5c469b7778fc198079f5c499 100644 (file)
@@ -624,9 +624,6 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps,
                return -ENOSPC;
        }
 
-       filter->preds[filter->n_preds] = pred;
-       filter->n_preds++;
-
        list_for_each_entry(call, &ftrace_events, list) {
 
                if (!call->define_fields)
@@ -643,6 +640,9 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps,
                }
                replace_filter_string(call->filter, filter_string);
        }
+
+       filter->preds[filter->n_preds] = pred;
+       filter->n_preds++;
 out:
        return err;
 }
@@ -1029,12 +1029,17 @@ static int replace_preds(struct event_subsystem *system,
 
                if (elt->op == OP_AND || elt->op == OP_OR) {
                        pred = create_logical_pred(elt->op);
+                       if (!pred)
+                               return -ENOMEM;
                        if (call) {
                                err = filter_add_pred(ps, call, pred);
                                filter_free_pred(pred);
-                       } else
+                       } else {
                                err = filter_add_subsystem_pred(ps, system,
                                                        pred, filter_string);
+                               if (err)
+                                       filter_free_pred(pred);
+                       }
                        if (err)
                                return err;
 
@@ -1048,12 +1053,17 @@ static int replace_preds(struct event_subsystem *system,
                }
 
                pred = create_pred(elt->op, operand1, operand2);
+               if (!pred)
+                       return -ENOMEM;
                if (call) {
                        err = filter_add_pred(ps, call, pred);
                        filter_free_pred(pred);
-               } else
+               } else {
                        err = filter_add_subsystem_pred(ps, system, pred,
                                                        filter_string);
+                       if (err)
+                               filter_free_pred(pred);
+               }
                if (err)
                        return err;
 
index d29baa2e063ac04ca7422f132207cf329b7e47c8..911ba7ffab842dd3017229e269851501f19c145f 100755 (executable)
@@ -393,7 +393,7 @@ while (<IN>) {
            $read_function = 0;
        }
        # print out any recorded offsets
-       update_funcs() if ($text_found);
+       update_funcs() if (defined($ref_func));
 
        # reset all markers and arrays
        $text_found = 0;
@@ -414,7 +414,10 @@ while (<IN>) {
            $offset = hex $1;
        } else {
            # if we already have a function, and this is weak, skip it
-           if (!defined($ref_func) && !defined($weak{$text})) {
+           if (!defined($ref_func) && !defined($weak{$text}) &&
+                # PPC64 can have symbols that start with .L and
+                # gcc considers these special. Don't use them!
+                $text !~ /^\.L/) {
                $ref_func = $text;
                $offset = hex $1;
            }
@@ -441,7 +444,7 @@ while (<IN>) {
 }
 
 # dump out anymore offsets that may have been found
-update_funcs() if ($text_found);
+update_funcs() if (defined($ref_func));
 
 # If we did not find any mcount callers, we are done (do nothing).
 if (!$opened) {
index 6da09928130f8b726446e21e55b4a114a02926dd..90c98082af106da85261a8995224386fcd228c4d 100644 (file)
@@ -412,6 +412,7 @@ static void create_counter(int counter, int cpu, pid_t pid)
        if (call_graph)
                attr->sample_type       |= PERF_SAMPLE_CALLCHAIN;
 
+
        attr->mmap              = track;
        attr->comm              = track;
        attr->inherit           = (cpu < 0) && inherit;