]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - tools/perf/util/session.c
perf session: Use appropriate pointer type instead of silly typecasting
[mv-sheeva.git] / tools / perf / util / session.c
index fa9d652c2dc3c07182028d4a196293333be75d78..525bcf6adc211000dee3f0205b10dc07c86bf31a 100644 (file)
@@ -104,7 +104,7 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc
        self->mmap_window = 32;
        self->machines = RB_ROOT;
        self->repipe = repipe;
-       INIT_LIST_HEAD(&self->ordered_samples.samples_head);
+       INIT_LIST_HEAD(&self->ordered_samples.samples);
        machine__init(&self->host_machine, "", HOST_KERNEL_ID);
 
        if (mode == O_RDONLY) {
@@ -262,7 +262,7 @@ static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
        if (handler->exit == NULL)
                handler->exit = process_event_stub;
        if (handler->lost == NULL)
-               handler->lost = process_event_stub;
+               handler->lost = event__process_lost;
        if (handler->read == NULL)
                handler->read = process_event_stub;
        if (handler->throttle == NULL)
@@ -386,34 +386,40 @@ static event__swap_op event__swap_ops[] = {
 
 struct sample_queue {
        u64                     timestamp;
-       struct sample_event     *event;
+       event_t                 *event;
        struct list_head        list;
 };
 
 static void flush_sample_queue(struct perf_session *s,
                               struct perf_event_ops *ops)
 {
-       struct list_head *head = &s->ordered_samples.samples_head;
-       u64 limit = s->ordered_samples.next_flush;
+       struct ordered_samples *os = &s->ordered_samples;
+       struct list_head *head = &os->samples;
        struct sample_queue *tmp, *iter;
+       u64 limit = os->next_flush;
+       u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
 
        if (!ops->ordered_samples || !limit)
                return;
 
        list_for_each_entry_safe(iter, tmp, head, list) {
                if (iter->timestamp > limit)
-                       return;
+                       break;
 
-               if (iter == s->ordered_samples.last_inserted)
-                       s->ordered_samples.last_inserted = NULL;
+               ops->sample(iter->event, s);
 
-               ops->sample((event_t *)iter->event, s);
-
-               s->ordered_samples.last_flush = iter->timestamp;
+               os->last_flush = iter->timestamp;
                list_del(&iter->list);
                free(iter->event);
                free(iter);
        }
+
+       if (list_empty(head)) {
+               os->last_sample = NULL;
+       } else if (last_ts <= limit) {
+               os->last_sample =
+                       list_entry(head->prev, struct sample_queue, list);
+       }
 }
 
 /*
@@ -465,71 +471,50 @@ static int process_finished_round(event_t *event __used,
        return 0;
 }
 
-static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
-{
-       struct sample_queue *iter;
-
-       list_for_each_entry_reverse(iter, head, list) {
-               if (iter->timestamp < new->timestamp) {
-                       list_add(&new->list, &iter->list);
-                       return;
-               }
-       }
-
-       list_add(&new->list, head);
-}
-
-static void __queue_sample_before(struct sample_queue *new,
-                                 struct sample_queue *iter,
-                                 struct list_head *head)
-{
-       list_for_each_entry_continue_reverse(iter, head, list) {
-               if (iter->timestamp < new->timestamp) {
-                       list_add(&new->list, &iter->list);
-                       return;
-               }
-       }
-
-       list_add(&new->list, head);
-}
-
-static void __queue_sample_after(struct sample_queue *new,
-                                struct sample_queue *iter,
-                                struct list_head *head)
-{
-       list_for_each_entry_continue(iter, head, list) {
-               if (iter->timestamp > new->timestamp) {
-                       list_add_tail(&new->list, &iter->list);
-                       return;
-               }
-       }
-       list_add_tail(&new->list, head);
-}
-
 /* The queue is ordered by time */
 static void __queue_sample_event(struct sample_queue *new,
                                 struct perf_session *s)
 {
-       struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
-       struct list_head *head = &s->ordered_samples.samples_head;
+       struct ordered_samples *os = &s->ordered_samples;
+       struct sample_queue *sample = os->last_sample;
+       u64 timestamp = new->timestamp;
+       struct list_head *p;
 
+       os->last_sample = new;
 
-       if (!last_inserted) {
-               __queue_sample_end(new, head);
+       if (!sample) {
+               list_add(&new->list, &os->samples);
+               os->max_timestamp = timestamp;
                return;
        }
 
        /*
-        * Most of the time the current event has a timestamp
-        * very close to the last event inserted, unless we just switched
-        * to another event buffer. Having a sorting based on a list and
-        * on the last inserted event that is close to the current one is
-        * probably more efficient than an rbtree based sorting.
+        * last_sample might point to some random place in the list as it's
+        * the last queued event. We expect that the new event is close to
+        * this.
         */
-       if (last_inserted->timestamp >= new->timestamp)
-               __queue_sample_before(new, last_inserted, head);
-       else
-               __queue_sample_after(new, last_inserted, head);
+       if (sample->timestamp <= timestamp) {
+               while (sample->timestamp <= timestamp) {
+                       p = sample->list.next;
+                       if (p == &os->samples) {
+                               list_add_tail(&new->list, &os->samples);
+                               os->max_timestamp = timestamp;
+                               return;
+                       }
+                       sample = list_entry(p, struct sample_queue, list);
+               }
+               list_add_tail(&new->list, &sample->list);
+       } else {
+               while (sample->timestamp > timestamp) {
+                       p = sample->list.prev;
+                       if (p == &os->samples) {
+                               list_add(&new->list, &os->samples);
+                               return;
+                       }
+                       sample = list_entry(p, struct sample_queue, list);
+               }
+               list_add(&new->list, &sample->list);
+       }
 }
 
 static int queue_sample_event(event_t *event, struct sample_data *data,
@@ -559,10 +544,6 @@ static int queue_sample_event(event_t *event, struct sample_data *data,
        memcpy(new->event, event, event->header.size);
 
        __queue_sample_event(new, s);
-       s->ordered_samples.last_inserted = new;
-
-       if (new->timestamp > s->ordered_samples.max_timestamp)
-               s->ordered_samples.max_timestamp = new->timestamp;
 
        return 0;
 }
@@ -852,6 +833,24 @@ done:
        flush_sample_queue(self, ops);
 out_err:
        ui_progress__delete(progress);
+
+       if (ops->lost == event__process_lost &&
+           self->hists.stats.total_lost != 0) {
+               ui__warning("Processed %Lu events and LOST %Lu!\n\n"
+                           "Check IO/CPU overload!\n\n",
+                           self->hists.stats.total_period,
+                           self->hists.stats.total_lost);
+       }
+       
+       if (self->hists.stats.nr_unknown_events != 0) {
+               ui__warning("Found %u unknown events!\n\n"
+                           "Is this an older tool processing a perf.data "
+                           "file generated by a more recent tool?\n\n"
+                           "If that is not the case, consider "
+                           "reporting to linux-kernel@vger.kernel.org.\n\n",
+                           self->hists.stats.nr_unknown_events);
+       }
+               
        return err;
 }