2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-annotate.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
20 #include "block-range.h"
21 #include "arch/common.h"
24 #include <linux/bitops.h>
25 #include <sys/utsname.h>
27 const char *disassembler_style;
28 const char *objdump_path;
29 static regex_t file_lineno;
31 static struct ins_ops *ins__find(struct arch *arch, const char *name);
32 static void ins__sort(struct arch *arch);
33 static int disasm_line__parse(char *line, const char **namep, char **rawp);
37 struct ins *instructions;
38 size_t nr_instructions;
39 size_t nr_instructions_allocated;
40 struct ins_ops *(*associate_instruction_ops)(struct arch *arch, const char *name);
41 bool sorted_instructions;
44 int (*init)(struct arch *arch);
47 char skip_functions_char;
51 static struct ins_ops call_ops;
52 static struct ins_ops dec_ops;
53 static struct ins_ops jump_ops;
54 static struct ins_ops mov_ops;
55 static struct ins_ops nop_ops;
56 static struct ins_ops lock_ops;
57 static struct ins_ops ret_ops;
59 static int arch__grow_instructions(struct arch *arch)
61 struct ins *new_instructions;
62 size_t new_nr_allocated;
64 if (arch->nr_instructions_allocated == 0 && arch->instructions)
65 goto grow_from_non_allocated_table;
67 new_nr_allocated = arch->nr_instructions_allocated + 128;
68 new_instructions = realloc(arch->instructions, new_nr_allocated * sizeof(struct ins));
69 if (new_instructions == NULL)
72 out_update_instructions:
73 arch->instructions = new_instructions;
74 arch->nr_instructions_allocated = new_nr_allocated;
77 grow_from_non_allocated_table:
78 new_nr_allocated = arch->nr_instructions + 128;
79 new_instructions = calloc(new_nr_allocated, sizeof(struct ins));
80 if (new_instructions == NULL)
83 memcpy(new_instructions, arch->instructions, arch->nr_instructions);
84 goto out_update_instructions;
87 static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops)
91 if (arch->nr_instructions == arch->nr_instructions_allocated &&
92 arch__grow_instructions(arch))
95 ins = &arch->instructions[arch->nr_instructions];
96 ins->name = strdup(name);
101 arch->nr_instructions++;
107 #include "arch/arm/annotate/instructions.c"
108 #include "arch/arm64/annotate/instructions.c"
109 #include "arch/x86/annotate/instructions.c"
110 #include "arch/powerpc/annotate/instructions.c"
112 static struct arch architectures[] = {
115 .init = arm__annotate_init,
119 .init = arm64__annotate_init,
123 .instructions = x86__instructions,
124 .nr_instructions = ARRAY_SIZE(x86__instructions),
131 .init = powerpc__annotate_init,
135 static void ins__delete(struct ins_operands *ops)
139 zfree(&ops->source.raw);
140 zfree(&ops->source.name);
141 zfree(&ops->target.raw);
142 zfree(&ops->target.name);
145 static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
146 struct ins_operands *ops)
148 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw);
151 int ins__scnprintf(struct ins *ins, char *bf, size_t size,
152 struct ins_operands *ops)
154 if (ins->ops->scnprintf)
155 return ins->ops->scnprintf(ins, bf, size, ops);
157 return ins__raw_scnprintf(ins, bf, size, ops);
160 static int call__parse(struct arch *arch, struct ins_operands *ops, struct map *map)
162 char *endptr, *tok, *name;
164 ops->target.addr = strtoull(ops->raw, &endptr, 16);
166 name = strchr(endptr, '<');
172 if (arch->objdump.skip_functions_char &&
173 strchr(name, arch->objdump.skip_functions_char))
176 tok = strchr(name, '>');
181 ops->target.name = strdup(name);
184 return ops->target.name == NULL ? -1 : 0;
187 tok = strchr(endptr, '*');
189 struct symbol *sym = map__find_symbol(map, map->map_ip(map, ops->target.addr));
191 ops->target.name = strdup(sym->name);
193 ops->target.addr = 0;
197 ops->target.addr = strtoull(tok + 1, NULL, 16);
201 static int call__scnprintf(struct ins *ins, char *bf, size_t size,
202 struct ins_operands *ops)
204 if (ops->target.name)
205 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name);
207 if (ops->target.addr == 0)
208 return ins__raw_scnprintf(ins, bf, size, ops);
210 return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr);
213 static struct ins_ops call_ops = {
214 .parse = call__parse,
215 .scnprintf = call__scnprintf,
218 bool ins__is_call(const struct ins *ins)
220 return ins->ops == &call_ops;
223 static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map *map __maybe_unused)
225 const char *s = strchr(ops->raw, '+');
227 ops->target.addr = strtoull(ops->raw, NULL, 16);
230 ops->target.offset = strtoull(s, NULL, 16);
232 ops->target.offset = UINT64_MAX;
237 static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
238 struct ins_operands *ops)
240 if (!ops->target.addr)
241 return ins__raw_scnprintf(ins, bf, size, ops);
243 return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset);
246 static struct ins_ops jump_ops = {
247 .parse = jump__parse,
248 .scnprintf = jump__scnprintf,
251 bool ins__is_jump(const struct ins *ins)
253 return ins->ops == &jump_ops;
256 static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
258 char *endptr, *name, *t;
260 if (strstr(raw, "(%rip)") == NULL)
263 *addrp = strtoull(comment, &endptr, 16);
264 name = strchr(endptr, '<');
270 t = strchr(name, '>');
275 *namep = strdup(name);
281 static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map *map)
283 ops->locked.ops = zalloc(sizeof(*ops->locked.ops));
284 if (ops->locked.ops == NULL)
287 if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0)
290 ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name);
292 if (ops->locked.ins.ops == NULL)
295 if (ops->locked.ins.ops->parse &&
296 ops->locked.ins.ops->parse(arch, ops->locked.ops, map) < 0)
302 zfree(&ops->locked.ops);
306 static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
307 struct ins_operands *ops)
311 if (ops->locked.ins.ops == NULL)
312 return ins__raw_scnprintf(ins, bf, size, ops);
314 printed = scnprintf(bf, size, "%-6.6s ", ins->name);
315 return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
316 size - printed, ops->locked.ops);
319 static void lock__delete(struct ins_operands *ops)
321 struct ins *ins = &ops->locked.ins;
323 if (ins->ops && ins->ops->free)
324 ins->ops->free(ops->locked.ops);
326 ins__delete(ops->locked.ops);
328 zfree(&ops->locked.ops);
329 zfree(&ops->target.raw);
330 zfree(&ops->target.name);
333 static struct ins_ops lock_ops = {
334 .free = lock__delete,
335 .parse = lock__parse,
336 .scnprintf = lock__scnprintf,
339 static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map *map __maybe_unused)
341 char *s = strchr(ops->raw, ','), *target, *comment, prev;
347 ops->source.raw = strdup(ops->raw);
350 if (ops->source.raw == NULL)
354 comment = strchr(s, arch->objdump.comment_char);
359 s = strchr(s, '\0') - 1;
361 while (s > target && isspace(s[0]))
367 ops->target.raw = strdup(target);
370 if (ops->target.raw == NULL)
371 goto out_free_source;
376 while (comment[0] != '\0' && isspace(comment[0]))
379 comment__symbol(ops->source.raw, comment, &ops->source.addr, &ops->source.name);
380 comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
385 zfree(&ops->source.raw);
389 static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
390 struct ins_operands *ops)
392 return scnprintf(bf, size, "%-6.6s %s,%s", ins->name,
393 ops->source.name ?: ops->source.raw,
394 ops->target.name ?: ops->target.raw);
397 static struct ins_ops mov_ops = {
399 .scnprintf = mov__scnprintf,
402 static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map *map __maybe_unused)
404 char *target, *comment, *s, prev;
406 target = s = ops->raw;
408 while (s[0] != '\0' && !isspace(s[0]))
413 ops->target.raw = strdup(target);
416 if (ops->target.raw == NULL)
419 comment = strchr(s, arch->objdump.comment_char);
423 while (comment[0] != '\0' && isspace(comment[0]))
426 comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
431 static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
432 struct ins_operands *ops)
434 return scnprintf(bf, size, "%-6.6s %s", ins->name,
435 ops->target.name ?: ops->target.raw);
438 static struct ins_ops dec_ops = {
440 .scnprintf = dec__scnprintf,
443 static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
444 struct ins_operands *ops __maybe_unused)
446 return scnprintf(bf, size, "%-6.6s", "nop");
449 static struct ins_ops nop_ops = {
450 .scnprintf = nop__scnprintf,
453 static struct ins_ops ret_ops = {
454 .scnprintf = ins__raw_scnprintf,
457 bool ins__is_ret(const struct ins *ins)
459 return ins->ops == &ret_ops;
462 static int ins__key_cmp(const void *name, const void *insp)
464 const struct ins *ins = insp;
466 return strcmp(name, ins->name);
469 static int ins__cmp(const void *a, const void *b)
471 const struct ins *ia = a;
472 const struct ins *ib = b;
474 return strcmp(ia->name, ib->name);
477 static void ins__sort(struct arch *arch)
479 const int nmemb = arch->nr_instructions;
481 qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp);
484 static struct ins_ops *__ins__find(struct arch *arch, const char *name)
487 const int nmemb = arch->nr_instructions;
489 if (!arch->sorted_instructions) {
491 arch->sorted_instructions = true;
494 ins = bsearch(name, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp);
495 return ins ? ins->ops : NULL;
498 static struct ins_ops *ins__find(struct arch *arch, const char *name)
500 struct ins_ops *ops = __ins__find(arch, name);
502 if (!ops && arch->associate_instruction_ops)
503 ops = arch->associate_instruction_ops(arch, name);
508 static int arch__key_cmp(const void *name, const void *archp)
510 const struct arch *arch = archp;
512 return strcmp(name, arch->name);
515 static int arch__cmp(const void *a, const void *b)
517 const struct arch *aa = a;
518 const struct arch *ab = b;
520 return strcmp(aa->name, ab->name);
523 static void arch__sort(void)
525 const int nmemb = ARRAY_SIZE(architectures);
527 qsort(architectures, nmemb, sizeof(struct arch), arch__cmp);
530 static struct arch *arch__find(const char *name)
532 const int nmemb = ARRAY_SIZE(architectures);
540 return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp);
543 int symbol__alloc_hist(struct symbol *sym)
545 struct annotation *notes = symbol__annotation(sym);
546 const size_t size = symbol__size(sym);
547 size_t sizeof_sym_hist;
549 /* Check for overflow when calculating sizeof_sym_hist */
550 if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(u64))
553 sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64));
555 /* Check for overflow in zalloc argument */
556 if (sizeof_sym_hist > (SIZE_MAX - sizeof(*notes->src))
557 / symbol_conf.nr_events)
560 notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
561 if (notes->src == NULL)
563 notes->src->sizeof_sym_hist = sizeof_sym_hist;
564 notes->src->nr_histograms = symbol_conf.nr_events;
565 INIT_LIST_HEAD(¬es->src->source);
569 /* The cycles histogram is lazily allocated. */
570 static int symbol__alloc_hist_cycles(struct symbol *sym)
572 struct annotation *notes = symbol__annotation(sym);
573 const size_t size = symbol__size(sym);
575 notes->src->cycles_hist = calloc(size, sizeof(struct cyc_hist));
576 if (notes->src->cycles_hist == NULL)
581 void symbol__annotate_zero_histograms(struct symbol *sym)
583 struct annotation *notes = symbol__annotation(sym);
585 pthread_mutex_lock(¬es->lock);
586 if (notes->src != NULL) {
587 memset(notes->src->histograms, 0,
588 notes->src->nr_histograms * notes->src->sizeof_sym_hist);
589 if (notes->src->cycles_hist)
590 memset(notes->src->cycles_hist, 0,
591 symbol__size(sym) * sizeof(struct cyc_hist));
593 pthread_mutex_unlock(¬es->lock);
596 static int __symbol__account_cycles(struct annotation *notes,
598 unsigned offset, unsigned cycles,
603 ch = notes->src->cycles_hist;
605 * For now we can only account one basic block per
606 * final jump. But multiple could be overlapping.
607 * Always account the longest one. So when
608 * a shorter one has been already seen throw it away.
610 * We separately always account the full cycles.
612 ch[offset].num_aggr++;
613 ch[offset].cycles_aggr += cycles;
615 if (!have_start && ch[offset].have_start)
617 if (ch[offset].num) {
618 if (have_start && (!ch[offset].have_start ||
619 ch[offset].start > start)) {
620 ch[offset].have_start = 0;
621 ch[offset].cycles = 0;
623 if (ch[offset].reset < 0xffff)
625 } else if (have_start &&
626 ch[offset].start < start)
629 ch[offset].have_start = have_start;
630 ch[offset].start = start;
631 ch[offset].cycles += cycles;
636 static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
637 struct annotation *notes, int evidx, u64 addr)
642 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
644 if (addr < sym->start || addr >= sym->end) {
645 pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
646 __func__, __LINE__, sym->name, sym->start, addr, sym->end);
650 offset = addr - sym->start;
651 h = annotation__histogram(notes, evidx);
655 pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
656 ", evidx=%d] => %" PRIu64 "\n", sym->start, sym->name,
657 addr, addr - sym->start, evidx, h->addr[offset]);
661 static struct annotation *symbol__get_annotation(struct symbol *sym, bool cycles)
663 struct annotation *notes = symbol__annotation(sym);
665 if (notes->src == NULL) {
666 if (symbol__alloc_hist(sym) < 0)
669 if (!notes->src->cycles_hist && cycles) {
670 if (symbol__alloc_hist_cycles(sym) < 0)
676 static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
679 struct annotation *notes;
683 notes = symbol__get_annotation(sym, false);
686 return __symbol__inc_addr_samples(sym, map, notes, evidx, addr);
689 static int symbol__account_cycles(u64 addr, u64 start,
690 struct symbol *sym, unsigned cycles)
692 struct annotation *notes;
697 notes = symbol__get_annotation(sym, true);
700 if (addr < sym->start || addr >= sym->end)
704 if (start < sym->start || start >= sym->end)
709 offset = addr - sym->start;
710 return __symbol__account_cycles(notes,
711 start ? start - sym->start : 0,
716 int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
717 struct addr_map_symbol *start,
727 * Only set start when IPC can be computed. We can only
728 * compute it when the basic block is completely in a single
730 * Special case the case when the jump is elsewhere, but
731 * it starts on the function start.
734 (start->sym == ams->sym ||
736 start->addr == ams->sym->start + ams->map->start)))
737 saddr = start->al_addr;
739 pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
741 start ? start->addr : 0,
742 ams->sym ? ams->sym->start + ams->map->start : 0,
744 err = symbol__account_cycles(ams->al_addr, saddr, ams->sym, cycles);
746 pr_debug2("account_cycles failed %d\n", err);
750 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx)
752 return symbol__inc_addr_samples(ams->sym, ams->map, evidx, ams->al_addr);
755 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
757 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
760 static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map *map)
762 dl->ins.ops = ins__find(arch, dl->ins.name);
767 if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, map) < 0)
771 static int disasm_line__parse(char *line, const char **namep, char **rawp)
773 char *name = line, tmp;
775 while (isspace(name[0]))
783 while ((*rawp)[0] != '\0' && !isspace((*rawp)[0]))
788 *namep = strdup(name);
795 if ((*rawp)[0] != '\0') {
797 while (isspace((*rawp)[0]))
809 static struct disasm_line *disasm_line__new(s64 offset, char *line,
810 size_t privsize, int line_nr,
814 struct disasm_line *dl = zalloc(sizeof(*dl) + privsize);
818 dl->line = strdup(line);
819 dl->line_nr = line_nr;
820 if (dl->line == NULL)
824 if (disasm_line__parse(dl->line, &dl->ins.name, &dl->ops.raw) < 0)
827 disasm_line__init_ins(dl, arch, map);
840 void disasm_line__free(struct disasm_line *dl)
843 if (dl->ins.ops && dl->ins.ops->free)
844 dl->ins.ops->free(&dl->ops);
846 ins__delete(&dl->ops);
847 free((void *)dl->ins.name);
852 int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
854 if (raw || !dl->ins.ops)
855 return scnprintf(bf, size, "%-6.6s %s", dl->ins.name, dl->ops.raw);
857 return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
860 static void disasm__add(struct list_head *head, struct disasm_line *line)
862 list_add_tail(&line->node, head);
865 struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos)
867 list_for_each_entry_continue(pos, head, node)
868 if (pos->offset >= 0)
874 double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
875 s64 end, const char **path, u64 *nr_samples)
877 struct source_line *src_line = notes->src->lines;
878 double percent = 0.0;
882 size_t sizeof_src_line = sizeof(*src_line) +
883 sizeof(src_line->samples) * (src_line->nr_pcnt - 1);
885 while (offset < end) {
886 src_line = (void *)notes->src->lines +
887 (sizeof_src_line * offset);
890 *path = src_line->path;
892 percent += src_line->samples[evidx].percent;
893 *nr_samples += src_line->samples[evidx].nr;
897 struct sym_hist *h = annotation__histogram(notes, evidx);
898 unsigned int hits = 0;
901 hits += h->addr[offset++];
905 percent = 100.0 * hits / h->sum;
912 static const char *annotate__address_color(struct block_range *br)
914 double cov = block_range__coverage(br);
917 /* mark red for >75% coverage */
919 return PERF_COLOR_RED;
921 /* mark dull for <1% coverage */
923 return PERF_COLOR_NORMAL;
926 return PERF_COLOR_MAGENTA;
929 static const char *annotate__asm_color(struct block_range *br)
931 double cov = block_range__coverage(br);
934 /* mark dull for <1% coverage */
936 return PERF_COLOR_NORMAL;
939 return PERF_COLOR_BLUE;
942 static void annotate__branch_printf(struct block_range *br, u64 addr)
944 bool emit_comment = true;
950 if (br->is_target && br->start == addr) {
951 struct block_range *branch = br;
955 * Find matching branch to our target.
957 while (!branch->is_branch)
958 branch = block_range__next(branch);
960 p = 100 *(double)br->entry / branch->coverage;
964 emit_comment = false;
969 * The percentage of coverage joined at this target in relation
970 * to the next branch.
972 printf(" +%.2f%%", p);
976 if (br->is_branch && br->end == addr) {
977 double p = 100*(double)br->taken / br->coverage;
981 emit_comment = false;
986 * The percentage of coverage leaving at this branch, and
987 * its prediction ratio.
989 printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred / br->taken);
995 static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start,
996 struct perf_evsel *evsel, u64 len, int min_pcnt, int printed,
997 int max_lines, struct disasm_line *queue)
999 static const char *prev_line;
1000 static const char *prev_color;
1002 if (dl->offset != -1) {
1003 const char *path = NULL;
1005 double percent, max_percent = 0.0;
1006 double *ppercents = &percent;
1007 u64 *psamples = &nr_samples;
1008 int i, nr_percent = 1;
1010 struct annotation *notes = symbol__annotation(sym);
1011 s64 offset = dl->offset;
1012 const u64 addr = start + offset;
1013 struct disasm_line *next;
1014 struct block_range *br;
1016 next = disasm__get_next_ip_line(¬es->src->source, dl);
1018 if (perf_evsel__is_group_event(evsel)) {
1019 nr_percent = evsel->nr_members;
1020 ppercents = calloc(nr_percent, sizeof(double));
1021 psamples = calloc(nr_percent, sizeof(u64));
1022 if (ppercents == NULL || psamples == NULL) {
1027 for (i = 0; i < nr_percent; i++) {
1028 percent = disasm__calc_percent(notes,
1029 notes->src->lines ? i : evsel->idx + i,
1031 next ? next->offset : (s64) len,
1032 &path, &nr_samples);
1034 ppercents[i] = percent;
1035 psamples[i] = nr_samples;
1036 if (percent > max_percent)
1037 max_percent = percent;
1040 if (max_percent < min_pcnt)
1043 if (max_lines && printed >= max_lines)
1046 if (queue != NULL) {
1047 list_for_each_entry_from(queue, ¬es->src->source, node) {
1050 disasm_line__print(queue, sym, start, evsel, len,
1055 color = get_percent_color(max_percent);
1058 * Also color the filename and line if needed, with
1059 * the same color than the percentage. Don't print it
1060 * twice for close colored addr with the same filename:line
1063 if (!prev_line || strcmp(prev_line, path)
1064 || color != prev_color) {
1065 color_fprintf(stdout, color, " %s", path);
1071 for (i = 0; i < nr_percent; i++) {
1072 percent = ppercents[i];
1073 nr_samples = psamples[i];
1074 color = get_percent_color(percent);
1076 if (symbol_conf.show_total_period)
1077 color_fprintf(stdout, color, " %7" PRIu64,
1080 color_fprintf(stdout, color, " %7.2f", percent);
1085 br = block_range__find(addr);
1086 color_fprintf(stdout, annotate__address_color(br), " %" PRIx64 ":", addr);
1087 color_fprintf(stdout, annotate__asm_color(br), "%s", dl->line);
1088 annotate__branch_printf(br, addr);
1091 if (ppercents != &percent)
1094 if (psamples != &nr_samples)
1097 } else if (max_lines && printed >= max_lines)
1105 if (perf_evsel__is_group_event(evsel))
1106 width *= evsel->nr_members;
1109 printf(" %*s:\n", width, " ");
1111 printf(" %*s: %s\n", width, " ", dl->line);
1118 * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw)
1119 * which looks like following
1121 * 0000000000415500 <_init>:
1122 * 415500: sub $0x8,%rsp
1123 * 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8>
1124 * 41550b: test %rax,%rax
1125 * 41550e: je 415515 <_init+0x15>
1126 * 415510: callq 416e70 <__gmon_start__@plt>
1127 * 415515: add $0x8,%rsp
1130 * it will be parsed and saved into struct disasm_line as
1131 * <offset> <name> <ops.raw>
1133 * The offset will be a relative offset from the start of the symbol and -1
1134 * means that it's not a disassembly line so should be treated differently.
1135 * The ops.raw part will be parsed further according to type of the instruction.
1137 static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
1139 FILE *file, size_t privsize,
1142 struct annotation *notes = symbol__annotation(sym);
1143 struct disasm_line *dl;
1144 char *line = NULL, *parsed_line, *tmp, *tmp2, *c;
1146 s64 line_ip, offset = -1;
1147 regmatch_t match[2];
1149 if (getline(&line, &line_len, file) < 0)
1155 while (line_len != 0 && isspace(line[line_len - 1]))
1156 line[--line_len] = '\0';
1158 c = strchr(line, '\n');
1165 /* /filename:linenr ? Save line number and ignore. */
1166 if (regexec(&file_lineno, line, 2, match, 0) == 0) {
1167 *line_nr = atoi(line + match[1].rm_so);
1172 * Strip leading spaces:
1183 * Parse hexa addresses followed by ':'
1185 line_ip = strtoull(tmp, &tmp2, 16);
1186 if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
1190 if (line_ip != -1) {
1191 u64 start = map__rip_2objdump(map, sym->start),
1192 end = map__rip_2objdump(map, sym->end);
1194 offset = line_ip - start;
1195 if ((u64)line_ip < start || (u64)line_ip >= end)
1198 parsed_line = tmp2 + 1;
1201 dl = disasm_line__new(offset, parsed_line, privsize, *line_nr, arch, map);
1208 if (dl->ops.target.offset == UINT64_MAX)
1209 dl->ops.target.offset = dl->ops.target.addr -
1210 map__rip_2objdump(map, sym->start);
1212 /* kcore has no symbols, so add the call target name */
1213 if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.name) {
1214 struct addr_map_symbol target = {
1216 .addr = dl->ops.target.addr,
1219 if (!map_groups__find_ams(&target) &&
1220 target.sym->start == target.al_addr)
1221 dl->ops.target.name = strdup(target.sym->name);
1224 disasm__add(¬es->src->source, dl);
1229 static __attribute__((constructor)) void symbol__init_regexpr(void)
1231 regcomp(&file_lineno, "^/[^:]+:([0-9]+)", REG_EXTENDED);
1234 static void delete_last_nop(struct symbol *sym)
1236 struct annotation *notes = symbol__annotation(sym);
1237 struct list_head *list = ¬es->src->source;
1238 struct disasm_line *dl;
1240 while (!list_empty(list)) {
1241 dl = list_entry(list->prev, struct disasm_line, node);
1244 if (dl->ins.ops != &nop_ops)
1247 if (!strstr(dl->line, " nop ") &&
1248 !strstr(dl->line, " nopl ") &&
1249 !strstr(dl->line, " nopw "))
1253 list_del(&dl->node);
1254 disasm_line__free(dl);
1258 int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *map,
1259 int errnum, char *buf, size_t buflen)
1261 struct dso *dso = map->dso;
1263 BUG_ON(buflen == 0);
1266 str_error_r(errnum, buf, buflen);
1271 case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX: {
1272 char bf[SBUILD_ID_SIZE + 15] = " with build id ";
1273 char *build_id_msg = NULL;
1275 if (dso->has_build_id) {
1276 build_id__sprintf(dso->build_id,
1277 sizeof(dso->build_id), bf + 15);
1280 scnprintf(buf, buflen,
1281 "No vmlinux file%s\nwas found in the path.\n\n"
1282 "Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n"
1284 " perf buildid-cache -vu vmlinux\n\n"
1286 " --vmlinux vmlinux\n", build_id_msg ?: "");
1290 scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
1297 static int dso__disassemble_filename(struct dso *dso, char *filename, size_t filename_size)
1299 char linkname[PATH_MAX];
1300 char *build_id_filename;
1302 if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
1303 !dso__is_kcore(dso))
1304 return SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX;
1306 build_id_filename = dso__build_id_filename(dso, NULL, 0);
1307 if (build_id_filename) {
1308 __symbol__join_symfs(filename, filename_size, build_id_filename);
1309 free(build_id_filename);
1311 if (dso->has_build_id)
1316 if (dso__is_kcore(dso) ||
1317 readlink(filename, linkname, sizeof(linkname)) < 0 ||
1318 strstr(linkname, DSO__NAME_KALLSYMS) ||
1319 access(filename, R_OK)) {
1322 * If we don't have build-ids or the build-id file isn't in the
1323 * cache, or is just a kallsyms file, well, lets hope that this
1324 * DSO is the same as when 'perf record' ran.
1326 __symbol__join_symfs(filename, filename_size, dso->long_name);
1332 static const char *annotate__norm_arch(const char *arch_name)
1336 if (!arch_name) { /* Assume we are annotating locally. */
1337 if (uname(&uts) < 0)
1339 arch_name = uts.machine;
1341 return normalize_arch((char *)arch_name);
1344 int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_name, size_t privsize)
1346 struct dso *dso = map->dso;
1347 char command[PATH_MAX * 2];
1348 struct arch *arch = NULL;
1350 char symfs_filename[PATH_MAX];
1351 struct kcore_extract kce;
1352 bool delete_extract = false;
1357 int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename));
1362 arch_name = annotate__norm_arch(arch_name);
1366 arch = arch__find(arch_name);
1371 err = arch->init(arch);
1373 pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
1378 pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
1379 symfs_filename, sym->name, map->unmap_ip(map, sym->start),
1380 map->unmap_ip(map, sym->end));
1382 pr_debug("annotating [%p] %30s : [%p] %30s\n",
1383 dso, dso->long_name, sym, sym->name);
1385 if (dso__is_kcore(dso)) {
1386 kce.kcore_filename = symfs_filename;
1387 kce.addr = map__rip_2objdump(map, sym->start);
1388 kce.offs = sym->start;
1389 kce.len = sym->end - sym->start;
1390 if (!kcore_extract__create(&kce)) {
1391 delete_extract = true;
1392 strlcpy(symfs_filename, kce.extract_filename,
1393 sizeof(symfs_filename));
1395 } else if (dso__needs_decompress(dso)) {
1401 if (kmod_path__parse_ext(&m, symfs_filename))
1404 snprintf(tmp, PATH_MAX, "/tmp/perf-kmod-XXXXXX");
1412 ret = decompress_to_file(m.ext, symfs_filename, fd);
1415 pr_err("Cannot decompress %s %s\n", m.ext, symfs_filename);
1423 strcpy(symfs_filename, tmp);
1426 snprintf(command, sizeof(command),
1427 "%s %s%s --start-address=0x%016" PRIx64
1428 " --stop-address=0x%016" PRIx64
1429 " -l -d %s %s -C %s 2>/dev/null|grep -v %s|expand",
1430 objdump_path ? objdump_path : "objdump",
1431 disassembler_style ? "-M " : "",
1432 disassembler_style ? disassembler_style : "",
1433 map__rip_2objdump(map, sym->start),
1434 map__rip_2objdump(map, sym->end),
1435 symbol_conf.annotate_asm_raw ? "" : "--no-show-raw",
1436 symbol_conf.annotate_src ? "-S" : "",
1437 symfs_filename, symfs_filename);
1439 pr_debug("Executing: %s\n", command);
1442 if (pipe(stdout_fd) < 0) {
1443 pr_err("Failure creating the pipe to run %s\n", command);
1444 goto out_remove_tmp;
1449 pr_err("Failure forking to run %s\n", command);
1450 goto out_close_stdout;
1454 close(stdout_fd[0]);
1455 dup2(stdout_fd[1], 1);
1456 close(stdout_fd[1]);
1457 execl("/bin/sh", "sh", "-c", command, NULL);
1462 close(stdout_fd[1]);
1464 file = fdopen(stdout_fd[0], "r");
1466 pr_err("Failure creating FILE stream for %s\n", command);
1468 * If we were using debug info should retry with
1471 goto out_remove_tmp;
1475 while (!feof(file)) {
1476 if (symbol__parse_objdump_line(sym, map, arch, file, privsize,
1483 pr_err("No output from %s\n", command);
1486 * kallsyms does not have symbol sizes so there may a nop at the end.
1489 if (dso__is_kcore(dso))
1490 delete_last_nop(sym);
1495 close(stdout_fd[0]);
1497 if (dso__needs_decompress(dso))
1498 unlink(symfs_filename);
1501 kcore_extract__delete(&kce);
1506 close(stdout_fd[1]);
1507 goto out_remove_tmp;
1510 static void insert_source_line(struct rb_root *root, struct source_line *src_line)
1512 struct source_line *iter;
1513 struct rb_node **p = &root->rb_node;
1514 struct rb_node *parent = NULL;
1517 while (*p != NULL) {
1519 iter = rb_entry(parent, struct source_line, node);
1521 ret = strcmp(iter->path, src_line->path);
1523 for (i = 0; i < src_line->nr_pcnt; i++)
1524 iter->samples[i].percent_sum += src_line->samples[i].percent;
1531 p = &(*p)->rb_right;
1534 for (i = 0; i < src_line->nr_pcnt; i++)
1535 src_line->samples[i].percent_sum = src_line->samples[i].percent;
1537 rb_link_node(&src_line->node, parent, p);
1538 rb_insert_color(&src_line->node, root);
1541 static int cmp_source_line(struct source_line *a, struct source_line *b)
1545 for (i = 0; i < a->nr_pcnt; i++) {
1546 if (a->samples[i].percent_sum == b->samples[i].percent_sum)
1548 return a->samples[i].percent_sum > b->samples[i].percent_sum;
1554 static void __resort_source_line(struct rb_root *root, struct source_line *src_line)
1556 struct source_line *iter;
1557 struct rb_node **p = &root->rb_node;
1558 struct rb_node *parent = NULL;
1560 while (*p != NULL) {
1562 iter = rb_entry(parent, struct source_line, node);
1564 if (cmp_source_line(src_line, iter))
1567 p = &(*p)->rb_right;
1570 rb_link_node(&src_line->node, parent, p);
1571 rb_insert_color(&src_line->node, root);
1574 static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
1576 struct source_line *src_line;
1577 struct rb_node *node;
1579 node = rb_first(src_root);
1581 struct rb_node *next;
1583 src_line = rb_entry(node, struct source_line, node);
1584 next = rb_next(node);
1585 rb_erase(node, src_root);
1587 __resort_source_line(dest_root, src_line);
1592 static void symbol__free_source_line(struct symbol *sym, int len)
1594 struct annotation *notes = symbol__annotation(sym);
1595 struct source_line *src_line = notes->src->lines;
1596 size_t sizeof_src_line;
1599 sizeof_src_line = sizeof(*src_line) +
1600 (sizeof(src_line->samples) * (src_line->nr_pcnt - 1));
1602 for (i = 0; i < len; i++) {
1603 free_srcline(src_line->path);
1604 src_line = (void *)src_line + sizeof_src_line;
1607 zfree(¬es->src->lines);
1610 /* Get the filename:line for the colored entries */
1611 static int symbol__get_source_line(struct symbol *sym, struct map *map,
1612 struct perf_evsel *evsel,
1613 struct rb_root *root, int len)
1617 int evidx = evsel->idx;
1618 struct source_line *src_line;
1619 struct annotation *notes = symbol__annotation(sym);
1620 struct sym_hist *h = annotation__histogram(notes, evidx);
1621 struct rb_root tmp_root = RB_ROOT;
1624 size_t sizeof_src_line = sizeof(struct source_line);
1626 if (perf_evsel__is_group_event(evsel)) {
1627 for (i = 1; i < evsel->nr_members; i++) {
1628 h = annotation__histogram(notes, evidx + i);
1631 nr_pcnt = evsel->nr_members;
1632 sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->samples);
1638 src_line = notes->src->lines = calloc(len, sizeof_src_line);
1639 if (!notes->src->lines)
1642 start = map__rip_2objdump(map, sym->start);
1644 for (i = 0; i < len; i++) {
1646 double percent_max = 0.0;
1648 src_line->nr_pcnt = nr_pcnt;
1650 for (k = 0; k < nr_pcnt; k++) {
1651 h = annotation__histogram(notes, evidx + k);
1652 src_line->samples[k].percent = 100.0 * h->addr[i] / h->sum;
1654 if (src_line->samples[k].percent > percent_max)
1655 percent_max = src_line->samples[k].percent;
1658 if (percent_max <= 0.5)
1662 src_line->path = get_srcline(map->dso, offset, NULL, false);
1663 insert_source_line(&tmp_root, src_line);
1666 src_line = (void *)src_line + sizeof_src_line;
1669 resort_source_line(root, &tmp_root);
1673 static void print_summary(struct rb_root *root, const char *filename)
1675 struct source_line *src_line;
1676 struct rb_node *node;
1678 printf("\nSorted summary for file %s\n", filename);
1679 printf("----------------------------------------------\n\n");
1681 if (RB_EMPTY_ROOT(root)) {
1682 printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
1686 node = rb_first(root);
1688 double percent, percent_max = 0.0;
1693 src_line = rb_entry(node, struct source_line, node);
1694 for (i = 0; i < src_line->nr_pcnt; i++) {
1695 percent = src_line->samples[i].percent_sum;
1696 color = get_percent_color(percent);
1697 color_fprintf(stdout, color, " %7.2f", percent);
1699 if (percent > percent_max)
1700 percent_max = percent;
1703 path = src_line->path;
1704 color = get_percent_color(percent_max);
1705 color_fprintf(stdout, color, " %s\n", path);
1707 node = rb_next(node);
1711 static void symbol__annotate_hits(struct symbol *sym, struct perf_evsel *evsel)
1713 struct annotation *notes = symbol__annotation(sym);
1714 struct sym_hist *h = annotation__histogram(notes, evsel->idx);
1715 u64 len = symbol__size(sym), offset;
1717 for (offset = 0; offset < len; ++offset)
1718 if (h->addr[offset] != 0)
1719 printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
1720 sym->start + offset, h->addr[offset]);
1721 printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum);
1724 int symbol__annotate_printf(struct symbol *sym, struct map *map,
1725 struct perf_evsel *evsel, bool full_paths,
1726 int min_pcnt, int max_lines, int context)
1728 struct dso *dso = map->dso;
1730 const char *d_filename;
1731 const char *evsel_name = perf_evsel__name(evsel);
1732 struct annotation *notes = symbol__annotation(sym);
1733 struct sym_hist *h = annotation__histogram(notes, evsel->idx);
1734 struct disasm_line *pos, *queue = NULL;
1735 u64 start = map__rip_2objdump(map, sym->start);
1736 int printed = 2, queue_len = 0;
1740 int graph_dotted_len;
1742 filename = strdup(dso->long_name);
1747 d_filename = filename;
1749 d_filename = basename(filename);
1751 len = symbol__size(sym);
1753 if (perf_evsel__is_group_event(evsel))
1754 width *= evsel->nr_members;
1756 graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples)\n",
1757 width, width, "Percent", d_filename, evsel_name, h->sum);
1759 printf("%-*.*s----\n",
1760 graph_dotted_len, graph_dotted_len, graph_dotted_line);
1763 symbol__annotate_hits(sym, evsel);
1765 list_for_each_entry(pos, ¬es->src->source, node) {
1766 if (context && queue == NULL) {
1771 switch (disasm_line__print(pos, sym, start, evsel, len,
1772 min_pcnt, printed, max_lines,
1777 printed += queue_len;
1783 /* filtered by max_lines */
1789 * Filtered by min_pcnt or non IP lines when
1794 if (queue_len == context)
1795 queue = list_entry(queue->node.next, typeof(*queue), node);
1807 void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
1809 struct annotation *notes = symbol__annotation(sym);
1810 struct sym_hist *h = annotation__histogram(notes, evidx);
1812 memset(h, 0, notes->src->sizeof_sym_hist);
1815 void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
1817 struct annotation *notes = symbol__annotation(sym);
1818 struct sym_hist *h = annotation__histogram(notes, evidx);
1819 int len = symbol__size(sym), offset;
1822 for (offset = 0; offset < len; ++offset) {
1823 h->addr[offset] = h->addr[offset] * 7 / 8;
1824 h->sum += h->addr[offset];
1828 void disasm__purge(struct list_head *head)
1830 struct disasm_line *pos, *n;
1832 list_for_each_entry_safe(pos, n, head, node) {
1833 list_del(&pos->node);
1834 disasm_line__free(pos);
1838 static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
1842 if (dl->offset == -1)
1843 return fprintf(fp, "%s\n", dl->line);
1845 printed = fprintf(fp, "%#" PRIx64 " %s", dl->offset, dl->ins.name);
1847 if (dl->ops.raw[0] != '\0') {
1848 printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
1852 return printed + fprintf(fp, "\n");
1855 size_t disasm__fprintf(struct list_head *head, FILE *fp)
1857 struct disasm_line *pos;
1860 list_for_each_entry(pos, head, node)
1861 printed += disasm_line__fprintf(pos, fp);
1866 int symbol__tty_annotate(struct symbol *sym, struct map *map,
1867 struct perf_evsel *evsel, bool print_lines,
1868 bool full_paths, int min_pcnt, int max_lines)
1870 struct dso *dso = map->dso;
1871 struct rb_root source_line = RB_ROOT;
1874 if (symbol__disassemble(sym, map, perf_evsel__env_arch(evsel), 0) < 0)
1877 len = symbol__size(sym);
1880 srcline_full_filename = full_paths;
1881 symbol__get_source_line(sym, map, evsel, &source_line, len);
1882 print_summary(&source_line, dso->long_name);
1885 symbol__annotate_printf(sym, map, evsel, full_paths,
1886 min_pcnt, max_lines, 0);
1888 symbol__free_source_line(sym, len);
1890 disasm__purge(&symbol__annotation(sym)->src->source);
1895 bool ui__has_annotation(void)
1897 return use_browser == 1 && perf_hpp_list.sym;