5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
10 #include "util/parse-options.h"
11 #include "util/trace-event.h"
13 #include "util/debug.h"
14 #include "util/session.h"
16 #include <sys/types.h>
17 #include <sys/prctl.h>
18 #include <semaphore.h>
23 #include <linux/list.h>
24 #include <linux/hash.h>
26 static struct perf_session *session;
28 /* based on kernel/lockdep.c */
29 #define LOCKHASH_BITS 12
30 #define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
32 static struct list_head lockhash_table[LOCKHASH_SIZE];
34 #define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
35 #define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
38 struct list_head hash_entry;
39 struct rb_node rb; /* used for sorting */
42 * FIXME: raw_field_value() returns unsigned long long,
43 * so address of lockdep_map should be dealed as 64bit.
44 * Is there more better solution?
46 void *addr; /* address of lockdep_map, used as ID */
47 char *name; /* for strcpy(), we cannot use const */
49 unsigned int nr_acquire;
50 unsigned int nr_acquired;
51 unsigned int nr_contended;
52 unsigned int nr_release;
54 unsigned int nr_readlock;
55 unsigned int nr_trylock;
56 /* these times are in nano sec. */
61 int discard; /* flag of blacklist */
65 * States of lock_seq_stat
67 * UNINITIALIZED is required for detecting first event of acquire.
68 * As the nature of lock events, there is no guarantee
69 * that the first event for the locks are acquire,
70 * it can be acquired, contended or release.
72 #define SEQ_STATE_UNINITIALIZED 0 /* initial state */
73 #define SEQ_STATE_RELEASED 1
74 #define SEQ_STATE_ACQUIRING 2
75 #define SEQ_STATE_ACQUIRED 3
76 #define SEQ_STATE_READ_ACQUIRED 4
77 #define SEQ_STATE_CONTENDED 5
81 * Imported from include/linux/sched.h.
82 * Should this be synchronized?
84 #define MAX_LOCK_DEPTH 48
87 * struct lock_seq_stat:
88 * Place to put on state of one lock sequence
89 * 1) acquire -> acquired -> release
90 * 2) acquire -> contended -> acquired -> release
91 * 3) acquire (with read or try) -> release
92 * 4) Are there other patterns?
94 struct lock_seq_stat {
95 struct list_head list;
107 struct list_head seq_list;
110 static struct rb_root thread_stats;
112 static struct thread_stat *thread_stat_find(u32 tid)
114 struct rb_node *node;
115 struct thread_stat *st;
117 node = thread_stats.rb_node;
119 st = container_of(node, struct thread_stat, rb);
122 else if (tid < st->tid)
123 node = node->rb_left;
125 node = node->rb_right;
131 static void thread_stat_insert(struct thread_stat *new)
133 struct rb_node **rb = &thread_stats.rb_node;
134 struct rb_node *parent = NULL;
135 struct thread_stat *p;
138 p = container_of(*rb, struct thread_stat, rb);
141 if (new->tid < p->tid)
142 rb = &(*rb)->rb_left;
143 else if (new->tid > p->tid)
144 rb = &(*rb)->rb_right;
146 BUG_ON("inserting invalid thread_stat\n");
149 rb_link_node(&new->rb, parent, rb);
150 rb_insert_color(&new->rb, &thread_stats);
153 static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
155 struct thread_stat *st;
157 st = thread_stat_find(tid);
161 st = zalloc(sizeof(struct thread_stat));
163 die("memory allocation failed\n");
166 INIT_LIST_HEAD(&st->seq_list);
168 thread_stat_insert(st);
173 static struct thread_stat *thread_stat_findnew_first(u32 tid);
174 static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
175 thread_stat_findnew_first;
177 static struct thread_stat *thread_stat_findnew_first(u32 tid)
179 struct thread_stat *st;
181 st = zalloc(sizeof(struct thread_stat));
183 die("memory allocation failed\n");
185 INIT_LIST_HEAD(&st->seq_list);
187 rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
188 rb_insert_color(&st->rb, &thread_stats);
190 thread_stat_findnew = thread_stat_findnew_after_first;
194 /* build simple key function one is bigger than two */
195 #define SINGLE_KEY(member) \
196 static int lock_stat_key_ ## member(struct lock_stat *one, \
197 struct lock_stat *two) \
199 return one->member > two->member; \
202 SINGLE_KEY(nr_acquired)
203 SINGLE_KEY(nr_contended)
204 SINGLE_KEY(wait_time_total)
205 SINGLE_KEY(wait_time_min)
206 SINGLE_KEY(wait_time_max)
210 * name: the value for specify by user
211 * this should be simpler than raw name of member
212 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
215 int (*key)(struct lock_stat*, struct lock_stat*);
218 static const char *sort_key = "acquired";
220 static int (*compare)(struct lock_stat *, struct lock_stat *);
222 static struct rb_root result; /* place to store sorted data */
224 #define DEF_KEY_LOCK(name, fn_suffix) \
225 { #name, lock_stat_key_ ## fn_suffix }
226 struct lock_key keys[] = {
227 DEF_KEY_LOCK(acquired, nr_acquired),
228 DEF_KEY_LOCK(contended, nr_contended),
229 DEF_KEY_LOCK(wait_total, wait_time_total),
230 DEF_KEY_LOCK(wait_min, wait_time_min),
231 DEF_KEY_LOCK(wait_max, wait_time_max),
233 /* extra comparisons much complicated should be here */
238 static void select_key(void)
242 for (i = 0; keys[i].name; i++) {
243 if (!strcmp(keys[i].name, sort_key)) {
244 compare = keys[i].key;
249 die("Unknown compare key:%s\n", sort_key);
252 static void insert_to_result(struct lock_stat *st,
253 int (*bigger)(struct lock_stat *, struct lock_stat *))
255 struct rb_node **rb = &result.rb_node;
256 struct rb_node *parent = NULL;
260 p = container_of(*rb, struct lock_stat, rb);
264 rb = &(*rb)->rb_left;
266 rb = &(*rb)->rb_right;
269 rb_link_node(&st->rb, parent, rb);
270 rb_insert_color(&st->rb, &result);
273 /* returns left most element of result, and erase it */
274 static struct lock_stat *pop_from_result(void)
276 struct rb_node *node = result.rb_node;
281 while (node->rb_left)
282 node = node->rb_left;
284 rb_erase(node, &result);
285 return container_of(node, struct lock_stat, rb);
288 static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
290 struct list_head *entry = lockhashentry(addr);
291 struct lock_stat *ret, *new;
293 list_for_each_entry(ret, entry, hash_entry) {
294 if (ret->addr == addr)
298 new = zalloc(sizeof(struct lock_stat));
303 new->name = zalloc(sizeof(char) * strlen(name) + 1);
306 strcpy(new->name, name);
308 new->wait_time_min = ULLONG_MAX;
310 list_add(&new->hash_entry, entry);
314 die("memory allocation failed\n");
317 static char const *input_name = "perf.data";
319 static int profile_cpu = -1;
321 struct raw_event_sample {
326 struct trace_acquire_event {
332 struct trace_acquired_event {
337 struct trace_contended_event {
342 struct trace_release_event {
347 struct trace_lock_handler {
348 void (*acquire_event)(struct trace_acquire_event *,
352 struct thread *thread);
354 void (*acquired_event)(struct trace_acquired_event *,
358 struct thread *thread);
360 void (*contended_event)(struct trace_contended_event *,
364 struct thread *thread);
366 void (*release_event)(struct trace_release_event *,
370 struct thread *thread);
373 static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
375 struct lock_seq_stat *seq;
377 list_for_each_entry(seq, &ts->seq_list, list) {
378 if (seq->addr == addr)
382 seq = zalloc(sizeof(struct lock_seq_stat));
384 die("Not enough memory\n");
385 seq->state = SEQ_STATE_UNINITIALIZED;
388 list_add(&seq->list, &ts->seq_list);
392 static int bad_hist[4];
395 report_lock_acquire_event(struct trace_acquire_event *acquire_event,
396 struct event *__event __used,
398 u64 timestamp __used,
399 struct thread *thread __used)
401 struct lock_stat *ls;
402 struct thread_stat *ts;
403 struct lock_seq_stat *seq;
405 ls = lock_stat_findnew(acquire_event->addr, acquire_event->name);
409 ts = thread_stat_findnew(thread->pid);
410 seq = get_seq(ts, acquire_event->addr);
412 switch (seq->state) {
413 case SEQ_STATE_UNINITIALIZED:
414 case SEQ_STATE_RELEASED:
415 if (!acquire_event->flag) {
416 seq->state = SEQ_STATE_ACQUIRING;
418 if (acquire_event->flag & 1)
420 if (acquire_event->flag & 2)
422 seq->state = SEQ_STATE_READ_ACQUIRED;
427 case SEQ_STATE_READ_ACQUIRED:
428 if (acquire_event->flag & 2) {
436 case SEQ_STATE_ACQUIRED:
437 case SEQ_STATE_ACQUIRING:
438 case SEQ_STATE_CONTENDED:
440 /* broken lock sequence, discard it */
443 list_del(&seq->list);
448 BUG_ON("Unknown state of lock sequence found!\n");
453 seq->prev_event_time = timestamp;
459 report_lock_acquired_event(struct trace_acquired_event *acquired_event,
460 struct event *__event __used,
462 u64 timestamp __used,
463 struct thread *thread __used)
465 struct lock_stat *ls;
466 struct thread_stat *ts;
467 struct lock_seq_stat *seq;
470 ls = lock_stat_findnew(acquired_event->addr, acquired_event->name);
474 ts = thread_stat_findnew(thread->pid);
475 seq = get_seq(ts, acquired_event->addr);
477 switch (seq->state) {
478 case SEQ_STATE_UNINITIALIZED:
479 /* orphan event, do nothing */
481 case SEQ_STATE_ACQUIRING:
483 case SEQ_STATE_CONTENDED:
484 contended_term = timestamp - seq->prev_event_time;
485 ls->wait_time_total += contended_term;
487 if (contended_term < ls->wait_time_min)
488 ls->wait_time_min = contended_term;
489 else if (ls->wait_time_max < contended_term)
490 ls->wait_time_max = contended_term;
492 case SEQ_STATE_RELEASED:
493 case SEQ_STATE_ACQUIRED:
494 case SEQ_STATE_READ_ACQUIRED:
495 /* broken lock sequence, discard it */
498 list_del(&seq->list);
504 BUG_ON("Unknown state of lock sequence found!\n");
508 seq->state = SEQ_STATE_ACQUIRED;
510 seq->prev_event_time = timestamp;
516 report_lock_contended_event(struct trace_contended_event *contended_event,
517 struct event *__event __used,
519 u64 timestamp __used,
520 struct thread *thread __used)
522 struct lock_stat *ls;
523 struct thread_stat *ts;
524 struct lock_seq_stat *seq;
526 ls = lock_stat_findnew(contended_event->addr, contended_event->name);
530 ts = thread_stat_findnew(thread->pid);
531 seq = get_seq(ts, contended_event->addr);
533 switch (seq->state) {
534 case SEQ_STATE_UNINITIALIZED:
535 /* orphan event, do nothing */
537 case SEQ_STATE_ACQUIRING:
539 case SEQ_STATE_RELEASED:
540 case SEQ_STATE_ACQUIRED:
541 case SEQ_STATE_READ_ACQUIRED:
542 case SEQ_STATE_CONTENDED:
543 /* broken lock sequence, discard it */
546 list_del(&seq->list);
551 BUG_ON("Unknown state of lock sequence found!\n");
555 seq->state = SEQ_STATE_CONTENDED;
557 seq->prev_event_time = timestamp;
563 report_lock_release_event(struct trace_release_event *release_event,
564 struct event *__event __used,
566 u64 timestamp __used,
567 struct thread *thread __used)
569 struct lock_stat *ls;
570 struct thread_stat *ts;
571 struct lock_seq_stat *seq;
573 ls = lock_stat_findnew(release_event->addr, release_event->name);
577 ts = thread_stat_findnew(thread->pid);
578 seq = get_seq(ts, release_event->addr);
580 switch (seq->state) {
581 case SEQ_STATE_UNINITIALIZED:
584 case SEQ_STATE_ACQUIRED:
586 case SEQ_STATE_READ_ACQUIRED:
588 BUG_ON(seq->read_count < 0);
589 if (!seq->read_count) {
594 case SEQ_STATE_ACQUIRING:
595 case SEQ_STATE_CONTENDED:
596 case SEQ_STATE_RELEASED:
597 /* broken lock sequence, discard it */
603 BUG_ON("Unknown state of lock sequence found!\n");
609 list_del(&seq->list);
615 /* lock oriented handlers */
616 /* TODO: handlers for CPU oriented, thread oriented */
617 static struct trace_lock_handler report_lock_ops = {
618 .acquire_event = report_lock_acquire_event,
619 .acquired_event = report_lock_acquired_event,
620 .contended_event = report_lock_contended_event,
621 .release_event = report_lock_release_event,
624 static struct trace_lock_handler *trace_handler;
627 process_lock_acquire_event(void *data,
628 struct event *event __used,
630 u64 timestamp __used,
631 struct thread *thread __used)
633 struct trace_acquire_event acquire_event;
634 u64 tmp; /* this is required for casting... */
636 tmp = raw_field_value(event, "lockdep_addr", data);
637 memcpy(&acquire_event.addr, &tmp, sizeof(void *));
638 acquire_event.name = (char *)raw_field_ptr(event, "name", data);
639 acquire_event.flag = (int)raw_field_value(event, "flag", data);
641 if (trace_handler->acquire_event)
642 trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
646 process_lock_acquired_event(void *data,
647 struct event *event __used,
649 u64 timestamp __used,
650 struct thread *thread __used)
652 struct trace_acquired_event acquired_event;
653 u64 tmp; /* this is required for casting... */
655 tmp = raw_field_value(event, "lockdep_addr", data);
656 memcpy(&acquired_event.addr, &tmp, sizeof(void *));
657 acquired_event.name = (char *)raw_field_ptr(event, "name", data);
659 if (trace_handler->acquire_event)
660 trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread);
664 process_lock_contended_event(void *data,
665 struct event *event __used,
667 u64 timestamp __used,
668 struct thread *thread __used)
670 struct trace_contended_event contended_event;
671 u64 tmp; /* this is required for casting... */
673 tmp = raw_field_value(event, "lockdep_addr", data);
674 memcpy(&contended_event.addr, &tmp, sizeof(void *));
675 contended_event.name = (char *)raw_field_ptr(event, "name", data);
677 if (trace_handler->acquire_event)
678 trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread);
682 process_lock_release_event(void *data,
683 struct event *event __used,
685 u64 timestamp __used,
686 struct thread *thread __used)
688 struct trace_release_event release_event;
689 u64 tmp; /* this is required for casting... */
691 tmp = raw_field_value(event, "lockdep_addr", data);
692 memcpy(&release_event.addr, &tmp, sizeof(void *));
693 release_event.name = (char *)raw_field_ptr(event, "name", data);
695 if (trace_handler->acquire_event)
696 trace_handler->release_event(&release_event, event, cpu, timestamp, thread);
700 process_raw_event(void *data, int cpu __used,
701 u64 timestamp __used, struct thread *thread __used)
706 type = trace_parse_common_type(data);
707 event = trace_find_event(type);
709 if (!strcmp(event->name, "lock_acquire"))
710 process_lock_acquire_event(data, event, cpu, timestamp, thread);
711 if (!strcmp(event->name, "lock_acquired"))
712 process_lock_acquired_event(data, event, cpu, timestamp, thread);
713 if (!strcmp(event->name, "lock_contended"))
714 process_lock_contended_event(data, event, cpu, timestamp, thread);
715 if (!strcmp(event->name, "lock_release"))
716 process_lock_release_event(data, event, cpu, timestamp, thread);
719 struct raw_event_queue {
723 struct thread *thread;
724 struct list_head list;
727 static LIST_HEAD(raw_event_head);
729 #define FLUSH_PERIOD (5 * NSEC_PER_SEC)
731 static u64 flush_limit = ULLONG_MAX;
732 static u64 last_flush = 0;
733 struct raw_event_queue *last_inserted;
735 static void flush_raw_event_queue(u64 limit)
737 struct raw_event_queue *tmp, *iter;
739 list_for_each_entry_safe(iter, tmp, &raw_event_head, list) {
740 if (iter->timestamp > limit)
743 if (iter == last_inserted)
744 last_inserted = NULL;
746 process_raw_event(iter->data, iter->cpu, iter->timestamp,
749 last_flush = iter->timestamp;
750 list_del(&iter->list);
756 static void __queue_raw_event_end(struct raw_event_queue *new)
758 struct raw_event_queue *iter;
760 list_for_each_entry_reverse(iter, &raw_event_head, list) {
761 if (iter->timestamp < new->timestamp) {
762 list_add(&new->list, &iter->list);
767 list_add(&new->list, &raw_event_head);
770 static void __queue_raw_event_before(struct raw_event_queue *new,
771 struct raw_event_queue *iter)
773 list_for_each_entry_continue_reverse(iter, &raw_event_head, list) {
774 if (iter->timestamp < new->timestamp) {
775 list_add(&new->list, &iter->list);
780 list_add(&new->list, &raw_event_head);
783 static void __queue_raw_event_after(struct raw_event_queue *new,
784 struct raw_event_queue *iter)
786 list_for_each_entry_continue(iter, &raw_event_head, list) {
787 if (iter->timestamp > new->timestamp) {
788 list_add_tail(&new->list, &iter->list);
792 list_add_tail(&new->list, &raw_event_head);
795 /* The queue is ordered by time */
796 static void __queue_raw_event(struct raw_event_queue *new)
798 if (!last_inserted) {
799 __queue_raw_event_end(new);
804 * Most of the time the current event has a timestamp
805 * very close to the last event inserted, unless we just switched
806 * to another event buffer. Having a sorting based on a list and
807 * on the last inserted event that is close to the current one is
808 * probably more efficient than an rbtree based sorting.
810 if (last_inserted->timestamp >= new->timestamp)
811 __queue_raw_event_before(new, last_inserted);
813 __queue_raw_event_after(new, last_inserted);
816 static void queue_raw_event(void *data, int raw_size, int cpu,
817 u64 timestamp, struct thread *thread)
819 struct raw_event_queue *new;
821 if (flush_limit == ULLONG_MAX)
822 flush_limit = timestamp + FLUSH_PERIOD;
824 if (timestamp < last_flush) {
825 printf("Warning: Timestamp below last timeslice flush\n");
829 new = malloc(sizeof(*new));
831 die("Not enough memory\n");
833 new->timestamp = timestamp;
835 new->thread = thread;
837 new->data = malloc(raw_size);
839 die("Not enough memory\n");
841 memcpy(new->data, data, raw_size);
843 __queue_raw_event(new);
847 * We want to have a slice of events covering 2 * FLUSH_PERIOD
848 * If FLUSH_PERIOD is big enough, it ensures every events that occured
849 * in the first half of the timeslice have all been buffered and there
850 * are none remaining (we need that because of the weakly ordered
851 * event recording we have). Then once we reach the 2 * FLUSH_PERIOD
852 * timeslice, we flush the first half to be gentle with the memory
853 * (the second half can still get new events in the middle, so wait
854 * another period to flush it)
856 if (new->timestamp > flush_limit &&
857 new->timestamp - flush_limit > FLUSH_PERIOD) {
858 flush_limit += FLUSH_PERIOD;
859 flush_raw_event_queue(flush_limit);
863 static int process_sample_event(event_t *event, struct perf_session *s)
865 struct thread *thread;
866 struct sample_data data;
868 bzero(&data, sizeof(struct sample_data));
869 event__parse_sample(event, s->sample_type, &data);
870 /* CAUTION: using tid as thread.pid */
871 thread = perf_session__findnew(s, data.tid);
873 if (thread == NULL) {
874 pr_debug("problem processing %d event, skipping it.\n",
879 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
881 if (profile_cpu != -1 && profile_cpu != (int) data.cpu)
884 queue_raw_event(data.raw_data, data.raw_size, data.cpu, data.time, thread);
889 /* TODO: various way to print, coloring, nano or milli sec */
890 static void print_result(void)
892 struct lock_stat *st;
896 printf("%20s ", "Name");
897 printf("%10s ", "acquired");
898 printf("%10s ", "contended");
900 printf("%15s ", "total wait (ns)");
901 printf("%15s ", "max wait (ns)");
902 printf("%15s ", "min wait (ns)");
907 while ((st = pop_from_result())) {
915 if (strlen(st->name) < 16) {
916 /* output raw name */
917 printf("%20s ", st->name);
919 strncpy(cut_name, st->name, 16);
924 /* cut off name for saving output style */
925 printf("%20s ", cut_name);
928 printf("%10u ", st->nr_acquired);
929 printf("%10u ", st->nr_contended);
931 printf("%15llu ", st->wait_time_total);
932 printf("%15llu ", st->wait_time_max);
933 printf("%15llu ", st->wait_time_min == ULLONG_MAX ?
934 0 : st->wait_time_min);
939 /* Output for debug, this have to be removed */
941 const char *name[4] =
942 { "acquire", "acquired", "contended", "release" };
944 printf("\n=== output for debug===\n\n");
945 printf("bad:%d, total:%d\n", bad, total);
946 printf("bad rate:%f\n", (double)(bad / total));
948 printf("histogram of events caused bad sequence\n");
949 for (i = 0; i < 4; i++)
950 printf(" %10s: %d\n", name[i], bad_hist[i]);
954 static void dump_map(void)
957 struct lock_stat *st;
959 for (i = 0; i < LOCKHASH_SIZE; i++) {
960 list_for_each_entry(st, &lockhash_table[i], hash_entry) {
961 printf("%p: %s\n", st->addr, st->name);
966 static struct perf_event_ops eops = {
967 .sample = process_sample_event,
968 .comm = event__process_comm,
971 static int read_events(void)
973 session = perf_session__new(input_name, O_RDONLY, 0);
975 die("Initializing perf session failed\n");
977 return perf_session__process_events(session, &eops);
980 static void sort_result(void)
983 struct lock_stat *st;
985 for (i = 0; i < LOCKHASH_SIZE; i++) {
986 list_for_each_entry(st, &lockhash_table[i], hash_entry) {
987 insert_to_result(st, compare);
992 static void __cmd_report(void)
997 flush_raw_event_queue(ULLONG_MAX);
1002 static const char * const report_usage[] = {
1003 "perf lock report [<options>]",
1007 static const struct option report_options[] = {
1008 OPT_STRING('k', "key", &sort_key, "acquired",
1014 static const char * const lock_usage[] = {
1015 "perf lock [<options>] {record|trace|report}",
1019 static const struct option lock_options[] = {
1020 OPT_STRING('i', "input", &input_name, "file", "input file name"),
1021 OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
1022 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
1026 static const char *record_args[] = {
1033 "-e", "lock:lock_acquire:r",
1034 "-e", "lock:lock_acquired:r",
1035 "-e", "lock:lock_contended:r",
1036 "-e", "lock:lock_release:r",
1039 static int __cmd_record(int argc, const char **argv)
1041 unsigned int rec_argc, i, j;
1042 const char **rec_argv;
1044 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1045 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1047 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1048 rec_argv[i] = strdup(record_args[i]);
1050 for (j = 1; j < (unsigned int)argc; j++, i++)
1051 rec_argv[i] = argv[j];
1053 BUG_ON(i != rec_argc);
1055 return cmd_record(i, rec_argv, NULL);
1058 int cmd_lock(int argc, const char **argv, const char *prefix __used)
1063 for (i = 0; i < LOCKHASH_SIZE; i++)
1064 INIT_LIST_HEAD(lockhash_table + i);
1066 argc = parse_options(argc, argv, lock_options, lock_usage,
1067 PARSE_OPT_STOP_AT_NON_OPTION);
1069 usage_with_options(lock_usage, lock_options);
1071 if (!strncmp(argv[0], "rec", 3)) {
1072 return __cmd_record(argc, argv);
1073 } else if (!strncmp(argv[0], "report", 6)) {
1074 trace_handler = &report_lock_ops;
1076 argc = parse_options(argc, argv,
1077 report_options, report_usage, 0);
1079 usage_with_options(report_usage, report_options);
1082 } else if (!strcmp(argv[0], "trace")) {
1083 /* Aliased to 'perf trace' */
1084 return cmd_trace(argc, argv, prefix);
1085 } else if (!strcmp(argv[0], "map")) {
1086 /* recycling report_lock_ops */
1087 trace_handler = &report_lock_ops;
1092 usage_with_options(lock_usage, lock_options);