11 #include <sys/param.h>
23 #include <sys/utsname.h>
25 #ifndef NT_GNU_BUILD_ID
26 #define NT_GNU_BUILD_ID 3
29 static bool dso__build_id_equal(const struct dso *self, u8 *build_id);
30 static int elf_read_build_id(Elf *elf, void *bf, size_t size);
31 static void dsos__add(struct list_head *head, struct dso *dso);
32 static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
33 static int dso__load_kernel_sym(struct dso *self, struct map *map,
34 symbol_filter_t filter);
35 static int dso__load_guest_kernel_sym(struct dso *self, struct map *map,
36 symbol_filter_t filter);
37 static int vmlinux_path__nr_entries;
38 static char **vmlinux_path;
40 struct symbol_conf symbol_conf = {
41 .exclude_other = true,
43 .try_vmlinux_path = true,
46 int dso__name_len(const struct dso *self)
49 return self->long_name_len;
51 return self->short_name_len;
54 bool dso__loaded(const struct dso *self, enum map_type type)
56 return self->loaded & (1 << type);
59 bool dso__sorted_by_name(const struct dso *self, enum map_type type)
61 return self->sorted_by_name & (1 << type);
64 static void dso__set_sorted_by_name(struct dso *self, enum map_type type)
66 self->sorted_by_name |= (1 << type);
69 bool symbol_type__is_a(char symbol_type, enum map_type map_type)
73 return symbol_type == 'T' || symbol_type == 'W';
75 return symbol_type == 'D' || symbol_type == 'd';
81 static void symbols__fixup_end(struct rb_root *self)
83 struct rb_node *nd, *prevnd = rb_first(self);
84 struct symbol *curr, *prev;
89 curr = rb_entry(prevnd, struct symbol, rb_node);
91 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
93 curr = rb_entry(nd, struct symbol, rb_node);
95 if (prev->end == prev->start)
96 prev->end = curr->start - 1;
100 if (curr->end == curr->start)
101 curr->end = roundup(curr->start, 4096);
104 static void __map_groups__fixup_end(struct map_groups *self, enum map_type type)
106 struct map *prev, *curr;
107 struct rb_node *nd, *prevnd = rb_first(&self->maps[type]);
112 curr = rb_entry(prevnd, struct map, rb_node);
114 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
116 curr = rb_entry(nd, struct map, rb_node);
117 prev->end = curr->start - 1;
121 * We still haven't the actual symbols, so guess the
122 * last map final address.
127 static void map_groups__fixup_end(struct map_groups *self)
130 for (i = 0; i < MAP__NR_TYPES; ++i)
131 __map_groups__fixup_end(self, i);
134 static struct symbol *symbol__new(u64 start, u64 len, u8 binding,
137 size_t namelen = strlen(name) + 1;
138 struct symbol *self = calloc(1, (symbol_conf.priv_size +
139 sizeof(*self) + namelen));
143 if (symbol_conf.priv_size)
144 self = ((void *)self) + symbol_conf.priv_size;
147 self->end = len ? start + len - 1 : start;
148 self->binding = binding;
149 self->namelen = namelen - 1;
151 pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end);
153 memcpy(self->name, name, namelen);
158 void symbol__delete(struct symbol *self)
160 free(((void *)self) - symbol_conf.priv_size);
163 static size_t symbol__fprintf(struct symbol *self, FILE *fp)
165 return fprintf(fp, " %llx-%llx %c %s\n",
166 self->start, self->end,
167 self->binding == STB_GLOBAL ? 'g' :
168 self->binding == STB_LOCAL ? 'l' : 'w',
172 void dso__set_long_name(struct dso *self, char *name)
176 self->long_name = name;
177 self->long_name_len = strlen(name);
180 static void dso__set_short_name(struct dso *self, const char *name)
184 self->short_name = name;
185 self->short_name_len = strlen(name);
188 static void dso__set_basename(struct dso *self)
190 dso__set_short_name(self, basename(self->long_name));
193 struct dso *dso__new(const char *name)
195 struct dso *self = calloc(1, sizeof(*self) + strlen(name) + 1);
199 strcpy(self->name, name);
200 dso__set_long_name(self, self->name);
201 dso__set_short_name(self, self->name);
202 for (i = 0; i < MAP__NR_TYPES; ++i)
203 self->symbols[i] = self->symbol_names[i] = RB_ROOT;
204 self->slen_calculated = 0;
205 self->origin = DSO__ORIG_NOT_FOUND;
207 self->sorted_by_name = 0;
208 self->has_build_id = 0;
209 self->kernel = DSO_TYPE_USER;
210 INIT_LIST_HEAD(&self->node);
216 static void symbols__delete(struct rb_root *self)
219 struct rb_node *next = rb_first(self);
222 pos = rb_entry(next, struct symbol, rb_node);
223 next = rb_next(&pos->rb_node);
224 rb_erase(&pos->rb_node, self);
229 void dso__delete(struct dso *self)
232 for (i = 0; i < MAP__NR_TYPES; ++i)
233 symbols__delete(&self->symbols[i]);
234 if (self->sname_alloc)
235 free((char *)self->short_name);
236 if (self->lname_alloc)
237 free(self->long_name);
241 void dso__set_build_id(struct dso *self, void *build_id)
243 memcpy(self->build_id, build_id, sizeof(self->build_id));
244 self->has_build_id = 1;
247 static void symbols__insert(struct rb_root *self, struct symbol *sym)
249 struct rb_node **p = &self->rb_node;
250 struct rb_node *parent = NULL;
251 const u64 ip = sym->start;
256 s = rb_entry(parent, struct symbol, rb_node);
262 rb_link_node(&sym->rb_node, parent, p);
263 rb_insert_color(&sym->rb_node, self);
266 static struct symbol *symbols__find(struct rb_root *self, u64 ip)
276 struct symbol *s = rb_entry(n, struct symbol, rb_node);
280 else if (ip > s->end)
289 struct symbol_name_rb_node {
290 struct rb_node rb_node;
294 static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym)
296 struct rb_node **p = &self->rb_node;
297 struct rb_node *parent = NULL;
298 struct symbol_name_rb_node *symn = ((void *)sym) - sizeof(*parent), *s;
302 s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
303 if (strcmp(sym->name, s->sym.name) < 0)
308 rb_link_node(&symn->rb_node, parent, p);
309 rb_insert_color(&symn->rb_node, self);
312 static void symbols__sort_by_name(struct rb_root *self, struct rb_root *source)
316 for (nd = rb_first(source); nd; nd = rb_next(nd)) {
317 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
318 symbols__insert_by_name(self, pos);
322 static struct symbol *symbols__find_by_name(struct rb_root *self, const char *name)
332 struct symbol_name_rb_node *s;
335 s = rb_entry(n, struct symbol_name_rb_node, rb_node);
336 cmp = strcmp(name, s->sym.name);
349 struct symbol *dso__find_symbol(struct dso *self,
350 enum map_type type, u64 addr)
352 return symbols__find(&self->symbols[type], addr);
355 struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type,
358 return symbols__find_by_name(&self->symbol_names[type], name);
361 void dso__sort_by_name(struct dso *self, enum map_type type)
363 dso__set_sorted_by_name(self, type);
364 return symbols__sort_by_name(&self->symbol_names[type],
365 &self->symbols[type]);
368 int build_id__sprintf(const u8 *self, int len, char *bf)
371 const u8 *raw = self;
374 for (i = 0; i < len; ++i) {
375 sprintf(bid, "%02x", *raw);
383 size_t dso__fprintf_buildid(struct dso *self, FILE *fp)
385 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
387 build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id);
388 return fprintf(fp, "%s", sbuild_id);
391 size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp)
395 struct symbol_name_rb_node *pos;
397 for (nd = rb_first(&self->symbol_names[type]); nd; nd = rb_next(nd)) {
398 pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
399 fprintf(fp, "%s\n", pos->sym.name);
405 size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp)
408 size_t ret = fprintf(fp, "dso: %s (", self->short_name);
410 if (self->short_name != self->long_name)
411 ret += fprintf(fp, "%s, ", self->long_name);
412 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
413 self->loaded ? "" : "NOT ");
414 ret += dso__fprintf_buildid(self, fp);
415 ret += fprintf(fp, ")\n");
416 for (nd = rb_first(&self->symbols[type]); nd; nd = rb_next(nd)) {
417 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
418 ret += symbol__fprintf(pos, fp);
424 int kallsyms__parse(const char *filename, void *arg,
425 int (*process_symbol)(void *arg, const char *name,
426 char type, u64 start))
431 FILE *file = fopen(filename, "r");
436 while (!feof(file)) {
442 line_len = getline(&line, &n, file);
443 if (line_len < 0 || !line)
446 line[--line_len] = '\0'; /* \n */
448 len = hex2u64(line, &start);
451 if (len + 2 >= line_len)
454 symbol_type = toupper(line[len]);
455 symbol_name = line + len + 2;
457 err = process_symbol(arg, symbol_name, symbol_type, start);
470 struct process_kallsyms_args {
475 static u8 kallsyms2elf_type(char type)
480 return isupper(type) ? STB_GLOBAL : STB_LOCAL;
483 static int map__process_kallsym_symbol(void *arg, const char *name,
484 char type, u64 start)
487 struct process_kallsyms_args *a = arg;
488 struct rb_root *root = &a->dso->symbols[a->map->type];
490 if (!symbol_type__is_a(type, a->map->type))
494 * Will fix up the end later, when we have all symbols sorted.
496 sym = symbol__new(start, 0, kallsyms2elf_type(type), name);
501 * We will pass the symbols to the filter later, in
502 * map__split_kallsyms, when we have split the maps per module
504 symbols__insert(root, sym);
510 * Loads the function entries in /proc/kallsyms into kernel_map->dso,
511 * so that we can in the next step set the symbol ->end address and then
512 * call kernel_maps__split_kallsyms.
514 static int dso__load_all_kallsyms(struct dso *self, const char *filename,
517 struct process_kallsyms_args args = { .map = map, .dso = self, };
518 return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
522 * Split the symbols into maps, making sure there are no overlaps, i.e. the
523 * kernel range is broken in several maps, named [kernel].N, as we don't have
524 * the original ELF section names vmlinux have.
526 static int dso__split_kallsyms(struct dso *self, struct map *map,
527 symbol_filter_t filter)
529 struct map_groups *kmaps = map__kmap(map)->kmaps;
530 struct machine *machine = kmaps->machine;
531 struct map *curr_map = map;
534 struct rb_root *root = &self->symbols[map->type];
535 struct rb_node *next = rb_first(root);
536 int kernel_range = 0;
541 pos = rb_entry(next, struct symbol, rb_node);
542 next = rb_next(&pos->rb_node);
544 module = strchr(pos->name, '\t');
546 if (!symbol_conf.use_modules)
551 if (strcmp(curr_map->dso->short_name, module)) {
552 if (curr_map != map &&
553 self->kernel == DSO_TYPE_GUEST_KERNEL &&
554 machine__is_default_guest(machine)) {
556 * We assume all symbols of a module are
557 * continuous in * kallsyms, so curr_map
558 * points to a module and all its
559 * symbols are in its kmap. Mark it as
562 dso__set_loaded(curr_map->dso,
566 curr_map = map_groups__find_by_name(kmaps,
568 if (curr_map == NULL) {
569 pr_debug("%s/proc/{kallsyms,modules} "
570 "inconsistency while looking "
571 "for \"%s\" module!\n",
572 machine->root_dir, module);
577 if (curr_map->dso->loaded &&
578 !machine__is_default_guest(machine))
582 * So that we look just like we get from .ko files,
583 * i.e. not prelinked, relative to map->start.
585 pos->start = curr_map->map_ip(curr_map, pos->start);
586 pos->end = curr_map->map_ip(curr_map, pos->end);
587 } else if (curr_map != map) {
588 char dso_name[PATH_MAX];
591 if (self->kernel == DSO_TYPE_GUEST_KERNEL)
592 snprintf(dso_name, sizeof(dso_name),
596 snprintf(dso_name, sizeof(dso_name),
600 dso = dso__new(dso_name);
604 dso->kernel = self->kernel;
606 curr_map = map__new2(pos->start, dso, map->type);
607 if (curr_map == NULL) {
612 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
613 map_groups__insert(kmaps, curr_map);
617 if (filter && filter(curr_map, pos)) {
618 discard_symbol: rb_erase(&pos->rb_node, root);
621 if (curr_map != map) {
622 rb_erase(&pos->rb_node, root);
623 symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
629 if (curr_map != map &&
630 self->kernel == DSO_TYPE_GUEST_KERNEL &&
631 machine__is_default_guest(kmaps->machine)) {
632 dso__set_loaded(curr_map->dso, curr_map->type);
638 int dso__load_kallsyms(struct dso *self, const char *filename,
639 struct map *map, symbol_filter_t filter)
641 if (dso__load_all_kallsyms(self, filename, map) < 0)
644 symbols__fixup_end(&self->symbols[map->type]);
645 if (self->kernel == DSO_TYPE_GUEST_KERNEL)
646 self->origin = DSO__ORIG_GUEST_KERNEL;
648 self->origin = DSO__ORIG_KERNEL;
650 return dso__split_kallsyms(self, map, filter);
653 static int dso__load_perf_map(struct dso *self, struct map *map,
654 symbol_filter_t filter)
661 file = fopen(self->long_name, "r");
665 while (!feof(file)) {
670 line_len = getline(&line, &n, file);
677 line[--line_len] = '\0'; /* \n */
679 len = hex2u64(line, &start);
682 if (len + 2 >= line_len)
685 len += hex2u64(line + len, &size);
688 if (len + 2 >= line_len)
691 sym = symbol__new(start, size, STB_GLOBAL, line + len);
694 goto out_delete_line;
696 if (filter && filter(map, sym))
699 symbols__insert(&self->symbols[map->type], sym);
716 * elf_symtab__for_each_symbol - iterate thru all the symbols
718 * @self: struct elf_symtab instance to iterate
720 * @sym: GElf_Sym iterator
722 #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
723 for (idx = 0, gelf_getsym(syms, idx, &sym);\
725 idx++, gelf_getsym(syms, idx, &sym))
727 static inline uint8_t elf_sym__type(const GElf_Sym *sym)
729 return GELF_ST_TYPE(sym->st_info);
732 static inline int elf_sym__is_function(const GElf_Sym *sym)
734 return elf_sym__type(sym) == STT_FUNC &&
736 sym->st_shndx != SHN_UNDEF;
739 static inline bool elf_sym__is_object(const GElf_Sym *sym)
741 return elf_sym__type(sym) == STT_OBJECT &&
743 sym->st_shndx != SHN_UNDEF;
746 static inline int elf_sym__is_label(const GElf_Sym *sym)
748 return elf_sym__type(sym) == STT_NOTYPE &&
750 sym->st_shndx != SHN_UNDEF &&
751 sym->st_shndx != SHN_ABS;
754 static inline const char *elf_sec__name(const GElf_Shdr *shdr,
755 const Elf_Data *secstrs)
757 return secstrs->d_buf + shdr->sh_name;
760 static inline int elf_sec__is_text(const GElf_Shdr *shdr,
761 const Elf_Data *secstrs)
763 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
766 static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
767 const Elf_Data *secstrs)
769 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
772 static inline const char *elf_sym__name(const GElf_Sym *sym,
773 const Elf_Data *symstrs)
775 return symstrs->d_buf + sym->st_name;
778 static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
779 GElf_Shdr *shp, const char *name,
785 while ((sec = elf_nextscn(elf, sec)) != NULL) {
788 gelf_getshdr(sec, shp);
789 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
790 if (!strcmp(name, str)) {
801 #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
802 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
804 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
806 #define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
807 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
809 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
812 * We need to check if we have a .dynsym, so that we can handle the
813 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
814 * .dynsym or .symtab).
815 * And always look at the original dso, not at debuginfo packages, that
816 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
818 static int dso__synthesize_plt_symbols(struct dso *self, struct map *map,
819 symbol_filter_t filter)
821 uint32_t nr_rel_entries, idx;
826 GElf_Shdr shdr_rel_plt, shdr_dynsym;
827 Elf_Data *reldata, *syms, *symstrs;
828 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
831 char sympltname[1024];
833 int nr = 0, symidx, fd, err = 0;
835 fd = open(self->long_name, O_RDONLY);
839 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
843 if (gelf_getehdr(elf, &ehdr) == NULL)
846 scn_dynsym = elf_section_by_name(elf, &ehdr, &shdr_dynsym,
847 ".dynsym", &dynsym_idx);
848 if (scn_dynsym == NULL)
851 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
853 if (scn_plt_rel == NULL) {
854 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
856 if (scn_plt_rel == NULL)
862 if (shdr_rel_plt.sh_link != dynsym_idx)
865 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
869 * Fetch the relocation section to find the idxes to the GOT
870 * and the symbols in the .dynsym they refer to.
872 reldata = elf_getdata(scn_plt_rel, NULL);
876 syms = elf_getdata(scn_dynsym, NULL);
880 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
881 if (scn_symstrs == NULL)
884 symstrs = elf_getdata(scn_symstrs, NULL);
888 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
889 plt_offset = shdr_plt.sh_offset;
891 if (shdr_rel_plt.sh_type == SHT_RELA) {
892 GElf_Rela pos_mem, *pos;
894 elf_section__for_each_rela(reldata, pos, pos_mem, idx,
896 symidx = GELF_R_SYM(pos->r_info);
897 plt_offset += shdr_plt.sh_entsize;
898 gelf_getsym(syms, symidx, &sym);
899 snprintf(sympltname, sizeof(sympltname),
900 "%s@plt", elf_sym__name(&sym, symstrs));
902 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
903 STB_GLOBAL, sympltname);
907 if (filter && filter(map, f))
910 symbols__insert(&self->symbols[map->type], f);
914 } else if (shdr_rel_plt.sh_type == SHT_REL) {
915 GElf_Rel pos_mem, *pos;
916 elf_section__for_each_rel(reldata, pos, pos_mem, idx,
918 symidx = GELF_R_SYM(pos->r_info);
919 plt_offset += shdr_plt.sh_entsize;
920 gelf_getsym(syms, symidx, &sym);
921 snprintf(sympltname, sizeof(sympltname),
922 "%s@plt", elf_sym__name(&sym, symstrs));
924 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
925 STB_GLOBAL, sympltname);
929 if (filter && filter(map, f))
932 symbols__insert(&self->symbols[map->type], f);
947 pr_debug("%s: problems reading %s PLT info.\n",
948 __func__, self->long_name);
952 static bool elf_sym__is_a(GElf_Sym *self, enum map_type type)
956 return elf_sym__is_function(self);
958 return elf_sym__is_object(self);
964 static bool elf_sec__is_a(GElf_Shdr *self, Elf_Data *secstrs, enum map_type type)
968 return elf_sec__is_text(self, secstrs);
970 return elf_sec__is_data(self, secstrs);
976 static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
982 while ((sec = elf_nextscn(elf, sec)) != NULL) {
983 gelf_getshdr(sec, &shdr);
985 if ((addr >= shdr.sh_addr) &&
986 (addr < (shdr.sh_addr + shdr.sh_size)))
995 static int dso__load_sym(struct dso *self, struct map *map, const char *name,
996 int fd, symbol_filter_t filter, int kmodule,
999 struct kmap *kmap = self->kernel ? map__kmap(map) : NULL;
1000 struct map *curr_map = map;
1001 struct dso *curr_dso = self;
1002 Elf_Data *symstrs, *secstrs;
1007 GElf_Shdr shdr, opdshdr;
1008 Elf_Data *syms, *opddata = NULL;
1010 Elf_Scn *sec, *sec_strndx, *opdsec;
1015 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1017 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
1021 if (gelf_getehdr(elf, &ehdr) == NULL) {
1022 pr_debug("%s: cannot get elf header.\n", __func__);
1026 /* Always reject images with a mismatched build-id: */
1027 if (self->has_build_id) {
1028 u8 build_id[BUILD_ID_SIZE];
1030 if (elf_read_build_id(elf, build_id,
1031 BUILD_ID_SIZE) != BUILD_ID_SIZE)
1034 if (!dso__build_id_equal(self, build_id))
1038 sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL);
1043 sec = elf_section_by_name(elf, &ehdr, &shdr, ".dynsym", NULL);
1048 opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx);
1050 opddata = elf_rawdata(opdsec, NULL);
1052 syms = elf_getdata(sec, NULL);
1056 sec = elf_getscn(elf, shdr.sh_link);
1060 symstrs = elf_getdata(sec, NULL);
1061 if (symstrs == NULL)
1064 sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
1065 if (sec_strndx == NULL)
1068 secstrs = elf_getdata(sec_strndx, NULL);
1069 if (secstrs == NULL)
1072 nr_syms = shdr.sh_size / shdr.sh_entsize;
1074 memset(&sym, 0, sizeof(sym));
1075 if (self->kernel == DSO_TYPE_USER) {
1076 self->adjust_symbols = (ehdr.e_type == ET_EXEC ||
1077 elf_section_by_name(elf, &ehdr, &shdr,
1078 ".gnu.prelink_undo",
1080 } else self->adjust_symbols = 0;
1082 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
1084 const char *elf_name = elf_sym__name(&sym, symstrs);
1085 char *demangled = NULL;
1086 int is_label = elf_sym__is_label(&sym);
1087 const char *section_name;
1089 if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
1090 strcmp(elf_name, kmap->ref_reloc_sym->name) == 0)
1091 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
1093 if (!is_label && !elf_sym__is_a(&sym, map->type))
1096 /* Reject ARM ELF "mapping symbols": these aren't unique and
1097 * don't identify functions, so will confuse the profile
1099 if (ehdr.e_machine == EM_ARM) {
1100 if (!strcmp(elf_name, "$a") ||
1101 !strcmp(elf_name, "$d") ||
1102 !strcmp(elf_name, "$t"))
1106 if (opdsec && sym.st_shndx == opdidx) {
1107 u32 offset = sym.st_value - opdshdr.sh_addr;
1108 u64 *opd = opddata->d_buf + offset;
1109 sym.st_value = *opd;
1110 sym.st_shndx = elf_addr_to_index(elf, sym.st_value);
1113 sec = elf_getscn(elf, sym.st_shndx);
1117 gelf_getshdr(sec, &shdr);
1119 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
1122 section_name = elf_sec__name(&shdr, secstrs);
1124 if (self->kernel != DSO_TYPE_USER || kmodule) {
1125 char dso_name[PATH_MAX];
1127 if (strcmp(section_name,
1128 (curr_dso->short_name +
1129 self->short_name_len)) == 0)
1132 if (strcmp(section_name, ".text") == 0) {
1138 snprintf(dso_name, sizeof(dso_name),
1139 "%s%s", self->short_name, section_name);
1141 curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name);
1142 if (curr_map == NULL) {
1143 u64 start = sym.st_value;
1146 start += map->start + shdr.sh_offset;
1148 curr_dso = dso__new(dso_name);
1149 if (curr_dso == NULL)
1151 curr_dso->kernel = self->kernel;
1152 curr_map = map__new2(start, curr_dso,
1154 if (curr_map == NULL) {
1155 dso__delete(curr_dso);
1158 curr_map->map_ip = identity__map_ip;
1159 curr_map->unmap_ip = identity__map_ip;
1160 curr_dso->origin = self->origin;
1161 map_groups__insert(kmap->kmaps, curr_map);
1162 dsos__add(&self->node, curr_dso);
1163 dso__set_loaded(curr_dso, map->type);
1165 curr_dso = curr_map->dso;
1170 if (curr_dso->adjust_symbols) {
1171 pr_debug4("%s: adjusting symbol: st_value: %#Lx "
1172 "sh_addr: %#Lx sh_offset: %#Lx\n", __func__,
1173 (u64)sym.st_value, (u64)shdr.sh_addr,
1174 (u64)shdr.sh_offset);
1175 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1178 * We need to figure out if the object was created from C++ sources
1179 * DWARF DW_compile_unit has this, but we don't always have access
1182 demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI);
1183 if (demangled != NULL)
1184 elf_name = demangled;
1186 f = symbol__new(sym.st_value, sym.st_size,
1187 GELF_ST_BIND(sym.st_info), elf_name);
1192 if (filter && filter(curr_map, f))
1195 symbols__insert(&curr_dso->symbols[curr_map->type], f);
1201 * For misannotated, zeroed, ASM function sizes.
1204 symbols__fixup_end(&self->symbols[map->type]);
1207 * We need to fixup this here too because we create new
1208 * maps here, for things like vsyscall sections.
1210 __map_groups__fixup_end(kmap->kmaps, map->type);
1220 static bool dso__build_id_equal(const struct dso *self, u8 *build_id)
1222 return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0;
1225 bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
1227 bool have_build_id = false;
1230 list_for_each_entry(pos, head, node) {
1231 if (with_hits && !pos->hit)
1233 if (pos->has_build_id) {
1234 have_build_id = true;
1237 if (filename__read_build_id(pos->long_name, pos->build_id,
1238 sizeof(pos->build_id)) > 0) {
1239 have_build_id = true;
1240 pos->has_build_id = true;
1244 return have_build_id;
1248 * Align offset to 4 bytes as needed for note name and descriptor data.
1250 #define NOTE_ALIGN(n) (((n) + 3) & -4U)
1252 static int elf_read_build_id(Elf *elf, void *bf, size_t size)
1262 if (size < BUILD_ID_SIZE)
1266 if (ek != ELF_K_ELF)
1269 if (gelf_getehdr(elf, &ehdr) == NULL) {
1270 pr_err("%s: cannot get elf header.\n", __func__);
1274 sec = elf_section_by_name(elf, &ehdr, &shdr,
1275 ".note.gnu.build-id", NULL);
1277 sec = elf_section_by_name(elf, &ehdr, &shdr,
1283 data = elf_getdata(sec, NULL);
1288 while (ptr < (data->d_buf + data->d_size)) {
1289 GElf_Nhdr *nhdr = ptr;
1290 int namesz = NOTE_ALIGN(nhdr->n_namesz),
1291 descsz = NOTE_ALIGN(nhdr->n_descsz);
1294 ptr += sizeof(*nhdr);
1297 if (nhdr->n_type == NT_GNU_BUILD_ID &&
1298 nhdr->n_namesz == sizeof("GNU")) {
1299 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
1300 memcpy(bf, ptr, BUILD_ID_SIZE);
1301 err = BUILD_ID_SIZE;
1312 int filename__read_build_id(const char *filename, void *bf, size_t size)
1317 if (size < BUILD_ID_SIZE)
1320 fd = open(filename, O_RDONLY);
1324 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1326 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
1330 err = elf_read_build_id(elf, bf, size);
1339 int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
1343 if (size < BUILD_ID_SIZE)
1346 fd = open(filename, O_RDONLY);
1355 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
1358 namesz = NOTE_ALIGN(nhdr.n_namesz);
1359 descsz = NOTE_ALIGN(nhdr.n_descsz);
1360 if (nhdr.n_type == NT_GNU_BUILD_ID &&
1361 nhdr.n_namesz == sizeof("GNU")) {
1362 if (read(fd, bf, namesz) != namesz)
1364 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
1365 if (read(fd, build_id,
1366 BUILD_ID_SIZE) == BUILD_ID_SIZE) {
1370 } else if (read(fd, bf, descsz) != descsz)
1373 int n = namesz + descsz;
1374 if (read(fd, bf, n) != n)
1383 char dso__symtab_origin(const struct dso *self)
1385 static const char origin[] = {
1386 [DSO__ORIG_KERNEL] = 'k',
1387 [DSO__ORIG_JAVA_JIT] = 'j',
1388 [DSO__ORIG_BUILD_ID_CACHE] = 'B',
1389 [DSO__ORIG_FEDORA] = 'f',
1390 [DSO__ORIG_UBUNTU] = 'u',
1391 [DSO__ORIG_BUILDID] = 'b',
1392 [DSO__ORIG_DSO] = 'd',
1393 [DSO__ORIG_KMODULE] = 'K',
1394 [DSO__ORIG_GUEST_KERNEL] = 'g',
1395 [DSO__ORIG_GUEST_KMODULE] = 'G',
1398 if (self == NULL || self->origin == DSO__ORIG_NOT_FOUND)
1400 return origin[self->origin];
1403 int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
1405 int size = PATH_MAX;
1409 struct machine *machine;
1410 const char *root_dir;
1413 dso__set_loaded(self, map->type);
1415 if (self->kernel == DSO_TYPE_KERNEL)
1416 return dso__load_kernel_sym(self, map, filter);
1417 else if (self->kernel == DSO_TYPE_GUEST_KERNEL)
1418 return dso__load_guest_kernel_sym(self, map, filter);
1420 if (map->groups && map->groups->machine)
1421 machine = map->groups->machine;
1425 name = malloc(size);
1429 self->adjust_symbols = 0;
1431 if (strncmp(self->name, "/tmp/perf-", 10) == 0) {
1432 ret = dso__load_perf_map(self, map, filter);
1433 self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT :
1434 DSO__ORIG_NOT_FOUND;
1438 /* Iterate over candidate debug images.
1439 * On the first pass, only load images if they have a full symtab.
1440 * Failing that, do a second pass where we accept .dynsym also
1442 for (self->origin = DSO__ORIG_BUILD_ID_CACHE, want_symtab = 1;
1443 self->origin != DSO__ORIG_NOT_FOUND;
1445 switch (self->origin) {
1446 case DSO__ORIG_BUILD_ID_CACHE:
1447 if (dso__build_id_filename(self, name, size) == NULL)
1450 case DSO__ORIG_FEDORA:
1451 snprintf(name, size, "/usr/lib/debug%s.debug",
1454 case DSO__ORIG_UBUNTU:
1455 snprintf(name, size, "/usr/lib/debug%s",
1458 case DSO__ORIG_BUILDID: {
1459 char build_id_hex[BUILD_ID_SIZE * 2 + 1];
1461 if (!self->has_build_id)
1464 build_id__sprintf(self->build_id,
1465 sizeof(self->build_id),
1467 snprintf(name, size,
1468 "/usr/lib/debug/.build-id/%.2s/%s.debug",
1469 build_id_hex, build_id_hex + 2);
1473 snprintf(name, size, "%s", self->long_name);
1475 case DSO__ORIG_GUEST_KMODULE:
1476 if (map->groups && map->groups->machine)
1477 root_dir = map->groups->machine->root_dir;
1480 snprintf(name, size, "%s%s", root_dir, self->long_name);
1485 * If we wanted a full symtab but no image had one,
1486 * relax our requirements and repeat the search.
1490 self->origin = DSO__ORIG_BUILD_ID_CACHE;
1495 /* Name is now the name of the next image to try */
1496 fd = open(name, O_RDONLY);
1500 ret = dso__load_sym(self, map, name, fd, filter, 0,
1505 * Some people seem to have debuginfo files _WITHOUT_ debug
1512 int nr_plt = dso__synthesize_plt_symbols(self, map, filter);
1520 if (ret < 0 && strstr(self->name, " (deleted)") != NULL)
1525 struct map *map_groups__find_by_name(struct map_groups *self,
1526 enum map_type type, const char *name)
1530 for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
1531 struct map *map = rb_entry(nd, struct map, rb_node);
1533 if (map->dso && strcmp(map->dso->short_name, name) == 0)
1540 static int dso__kernel_module_get_build_id(struct dso *self,
1541 const char *root_dir)
1543 char filename[PATH_MAX];
1545 * kernel module short names are of the form "[module]" and
1546 * we need just "module" here.
1548 const char *name = self->short_name + 1;
1550 snprintf(filename, sizeof(filename),
1551 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1552 root_dir, (int)strlen(name) - 1, name);
1554 if (sysfs__read_build_id(filename, self->build_id,
1555 sizeof(self->build_id)) == 0)
1556 self->has_build_id = true;
1561 static int map_groups__set_modules_path_dir(struct map_groups *self,
1562 const char *dir_name)
1564 struct dirent *dent;
1565 DIR *dir = opendir(dir_name);
1569 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1573 while ((dent = readdir(dir)) != NULL) {
1574 char path[PATH_MAX];
1577 /*sshfs might return bad dent->d_type, so we have to stat*/
1578 sprintf(path, "%s/%s", dir_name, dent->d_name);
1579 if (stat(path, &st))
1582 if (S_ISDIR(st.st_mode)) {
1583 if (!strcmp(dent->d_name, ".") ||
1584 !strcmp(dent->d_name, ".."))
1587 snprintf(path, sizeof(path), "%s/%s",
1588 dir_name, dent->d_name);
1589 ret = map_groups__set_modules_path_dir(self, path);
1593 char *dot = strrchr(dent->d_name, '.'),
1598 if (dot == NULL || strcmp(dot, ".ko"))
1600 snprintf(dso_name, sizeof(dso_name), "[%.*s]",
1601 (int)(dot - dent->d_name), dent->d_name);
1603 strxfrchar(dso_name, '-', '_');
1604 map = map_groups__find_by_name(self, MAP__FUNCTION, dso_name);
1608 snprintf(path, sizeof(path), "%s/%s",
1609 dir_name, dent->d_name);
1611 long_name = strdup(path);
1612 if (long_name == NULL) {
1616 dso__set_long_name(map->dso, long_name);
1617 map->dso->lname_alloc = 1;
1618 dso__kernel_module_get_build_id(map->dso, "");
1627 static char *get_kernel_version(const char *root_dir)
1629 char version[PATH_MAX];
1632 const char *prefix = "Linux version ";
1634 sprintf(version, "%s/proc/version", root_dir);
1635 file = fopen(version, "r");
1640 tmp = fgets(version, sizeof(version), file);
1643 name = strstr(version, prefix);
1646 name += strlen(prefix);
1647 tmp = strchr(name, ' ');
1651 return strdup(name);
1654 static int machine__set_modules_path(struct machine *self)
1657 char modules_path[PATH_MAX];
1659 version = get_kernel_version(self->root_dir);
1663 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
1664 self->root_dir, version);
1667 return map_groups__set_modules_path_dir(&self->kmaps, modules_path);
1671 * Constructor variant for modules (where we know from /proc/modules where
1672 * they are loaded) and for vmlinux, where only after we load all the
1673 * symbols we'll know where it starts and ends.
1675 static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
1677 struct map *self = calloc(1, (sizeof(*self) +
1678 (dso->kernel ? sizeof(struct kmap) : 0)));
1681 * ->end will be filled after we load all the symbols
1683 map__init(self, type, start, 0, 0, dso);
1689 struct map *machine__new_module(struct machine *self, u64 start,
1690 const char *filename)
1693 struct dso *dso = __dsos__findnew(&self->kernel_dsos, filename);
1698 map = map__new2(start, dso, MAP__FUNCTION);
1702 if (machine__is_host(self))
1703 dso->origin = DSO__ORIG_KMODULE;
1705 dso->origin = DSO__ORIG_GUEST_KMODULE;
1706 map_groups__insert(&self->kmaps, map);
1710 static int machine__create_modules(struct machine *self)
1716 const char *modules;
1717 char path[PATH_MAX];
1719 if (machine__is_default_guest(self))
1720 modules = symbol_conf.default_guest_modules;
1722 sprintf(path, "%s/proc/modules", self->root_dir);
1726 file = fopen(modules, "r");
1730 while (!feof(file)) {
1731 char name[PATH_MAX];
1736 line_len = getline(&line, &n, file);
1743 line[--line_len] = '\0'; /* \n */
1745 sep = strrchr(line, 'x');
1749 hex2u64(sep + 1, &start);
1751 sep = strchr(line, ' ');
1757 snprintf(name, sizeof(name), "[%s]", line);
1758 map = machine__new_module(self, start, name);
1760 goto out_delete_line;
1761 dso__kernel_module_get_build_id(map->dso, self->root_dir);
1767 return machine__set_modules_path(self);
1775 static int dso__load_vmlinux(struct dso *self, struct map *map,
1776 const char *vmlinux, symbol_filter_t filter)
1780 fd = open(vmlinux, O_RDONLY);
1784 dso__set_loaded(self, map->type);
1785 err = dso__load_sym(self, map, vmlinux, fd, filter, 0, 0);
1789 pr_debug("Using %s for symbols\n", vmlinux);
1794 int dso__load_vmlinux_path(struct dso *self, struct map *map,
1795 symbol_filter_t filter)
1800 pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1801 vmlinux_path__nr_entries + 1);
1803 filename = dso__build_id_filename(self, NULL, 0);
1804 if (filename != NULL) {
1805 err = dso__load_vmlinux(self, map, filename, filter);
1807 dso__set_long_name(self, filename);
1813 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1814 err = dso__load_vmlinux(self, map, vmlinux_path[i], filter);
1816 dso__set_long_name(self, strdup(vmlinux_path[i]));
1824 static int dso__load_kernel_sym(struct dso *self, struct map *map,
1825 symbol_filter_t filter)
1828 const char *kallsyms_filename = NULL;
1829 char *kallsyms_allocated_filename = NULL;
1831 * Step 1: if the user specified a vmlinux filename, use it and only
1832 * it, reporting errors to the user if it cannot be used.
1834 * For instance, try to analyse an ARM perf.data file _without_ a
1835 * build-id, or if the user specifies the wrong path to the right
1836 * vmlinux file, obviously we can't fallback to another vmlinux (a
1837 * x86_86 one, on the machine where analysis is being performed, say),
1838 * or worse, /proc/kallsyms.
1840 * If the specified file _has_ a build-id and there is a build-id
1841 * section in the perf.data file, we will still do the expected
1842 * validation in dso__load_vmlinux and will bail out if they don't
1845 if (symbol_conf.vmlinux_name != NULL) {
1846 err = dso__load_vmlinux(self, map,
1847 symbol_conf.vmlinux_name, filter);
1849 dso__set_long_name(self,
1850 strdup(symbol_conf.vmlinux_name));
1856 if (vmlinux_path != NULL) {
1857 err = dso__load_vmlinux_path(self, map, filter);
1863 * Say the kernel DSO was created when processing the build-id header table,
1864 * we have a build-id, so check if it is the same as the running kernel,
1865 * using it if it is.
1867 if (self->has_build_id) {
1868 u8 kallsyms_build_id[BUILD_ID_SIZE];
1869 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1871 if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id,
1872 sizeof(kallsyms_build_id)) == 0) {
1873 if (dso__build_id_equal(self, kallsyms_build_id)) {
1874 kallsyms_filename = "/proc/kallsyms";
1879 * Now look if we have it on the build-id cache in
1880 * $HOME/.debug/[kernel.kallsyms].
1882 build_id__sprintf(self->build_id, sizeof(self->build_id),
1885 if (asprintf(&kallsyms_allocated_filename,
1886 "%s/.debug/[kernel.kallsyms]/%s",
1887 getenv("HOME"), sbuild_id) == -1) {
1888 pr_err("Not enough memory for kallsyms file lookup\n");
1892 kallsyms_filename = kallsyms_allocated_filename;
1894 if (access(kallsyms_filename, F_OK)) {
1895 pr_err("No kallsyms or vmlinux with build-id %s "
1896 "was found\n", sbuild_id);
1897 free(kallsyms_allocated_filename);
1902 * Last resort, if we don't have a build-id and couldn't find
1903 * any vmlinux file, try the running kernel kallsyms table.
1905 kallsyms_filename = "/proc/kallsyms";
1909 err = dso__load_kallsyms(self, kallsyms_filename, map, filter);
1911 pr_debug("Using %s for symbols\n", kallsyms_filename);
1912 free(kallsyms_allocated_filename);
1916 if (kallsyms_filename != NULL)
1917 dso__set_long_name(self, strdup("[kernel.kallsyms]"));
1918 map__fixup_start(map);
1919 map__fixup_end(map);
1925 static int dso__load_guest_kernel_sym(struct dso *self, struct map *map,
1926 symbol_filter_t filter)
1929 const char *kallsyms_filename = NULL;
1930 struct machine *machine;
1931 char path[PATH_MAX];
1934 pr_debug("Guest kernel map hasn't the point to groups\n");
1937 machine = map->groups->machine;
1939 if (machine__is_default_guest(machine)) {
1941 * if the user specified a vmlinux filename, use it and only
1942 * it, reporting errors to the user if it cannot be used.
1943 * Or use file guest_kallsyms inputted by user on commandline
1945 if (symbol_conf.default_guest_vmlinux_name != NULL) {
1946 err = dso__load_vmlinux(self, map,
1947 symbol_conf.default_guest_vmlinux_name, filter);
1951 kallsyms_filename = symbol_conf.default_guest_kallsyms;
1952 if (!kallsyms_filename)
1955 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
1956 kallsyms_filename = path;
1959 err = dso__load_kallsyms(self, kallsyms_filename, map, filter);
1961 pr_debug("Using %s for symbols\n", kallsyms_filename);
1965 if (kallsyms_filename != NULL) {
1966 machine__mmap_name(machine, path, sizeof(path));
1967 dso__set_long_name(self, strdup(path));
1969 map__fixup_start(map);
1970 map__fixup_end(map);
1976 static void dsos__add(struct list_head *head, struct dso *dso)
1978 list_add_tail(&dso->node, head);
1981 static struct dso *dsos__find(struct list_head *head, const char *name)
1985 list_for_each_entry(pos, head, node)
1986 if (strcmp(pos->long_name, name) == 0)
1991 struct dso *__dsos__findnew(struct list_head *head, const char *name)
1993 struct dso *dso = dsos__find(head, name);
1996 dso = dso__new(name);
1998 dsos__add(head, dso);
1999 dso__set_basename(dso);
2006 size_t __dsos__fprintf(struct list_head *head, FILE *fp)
2011 list_for_each_entry(pos, head, node) {
2013 for (i = 0; i < MAP__NR_TYPES; ++i)
2014 ret += dso__fprintf(pos, i, fp);
2020 size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp)
2025 for (nd = rb_first(self); nd; nd = rb_next(nd)) {
2026 struct machine *pos = rb_entry(nd, struct machine, rb_node);
2027 ret += __dsos__fprintf(&pos->kernel_dsos, fp);
2028 ret += __dsos__fprintf(&pos->user_dsos, fp);
2034 static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
2040 list_for_each_entry(pos, head, node) {
2041 if (with_hits && !pos->hit)
2043 ret += dso__fprintf_buildid(pos, fp);
2044 ret += fprintf(fp, " %s\n", pos->long_name);
2049 size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits)
2051 return __dsos__fprintf_buildid(&self->kernel_dsos, fp, with_hits) +
2052 __dsos__fprintf_buildid(&self->user_dsos, fp, with_hits);
2055 size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits)
2060 for (nd = rb_first(self); nd; nd = rb_next(nd)) {
2061 struct machine *pos = rb_entry(nd, struct machine, rb_node);
2062 ret += machine__fprintf_dsos_buildid(pos, fp, with_hits);
2067 struct dso *dso__new_kernel(const char *name)
2069 struct dso *self = dso__new(name ?: "[kernel.kallsyms]");
2072 dso__set_short_name(self, "[kernel]");
2073 self->kernel = DSO_TYPE_KERNEL;
2079 static struct dso *dso__new_guest_kernel(struct machine *machine,
2083 struct dso *self = dso__new(name ?: machine__mmap_name(machine, bf, sizeof(bf)));
2086 dso__set_short_name(self, "[guest.kernel]");
2087 self->kernel = DSO_TYPE_GUEST_KERNEL;
2093 void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine)
2095 char path[PATH_MAX];
2097 if (machine__is_default_guest(machine))
2099 sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
2100 if (sysfs__read_build_id(path, self->build_id,
2101 sizeof(self->build_id)) == 0)
2102 self->has_build_id = true;
2105 static struct dso *machine__create_kernel(struct machine *self)
2107 const char *vmlinux_name = NULL;
2110 if (machine__is_host(self)) {
2111 vmlinux_name = symbol_conf.vmlinux_name;
2112 kernel = dso__new_kernel(vmlinux_name);
2114 if (machine__is_default_guest(self))
2115 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
2116 kernel = dso__new_guest_kernel(self, vmlinux_name);
2119 if (kernel != NULL) {
2120 dso__read_running_kernel_build_id(kernel, self);
2121 dsos__add(&self->kernel_dsos, kernel);
2126 int __machine__create_kernel_maps(struct machine *self, struct dso *kernel)
2130 for (type = 0; type < MAP__NR_TYPES; ++type) {
2133 self->vmlinux_maps[type] = map__new2(0, kernel, type);
2134 if (self->vmlinux_maps[type] == NULL)
2137 self->vmlinux_maps[type]->map_ip =
2138 self->vmlinux_maps[type]->unmap_ip = identity__map_ip;
2140 kmap = map__kmap(self->vmlinux_maps[type]);
2141 kmap->kmaps = &self->kmaps;
2142 map_groups__insert(&self->kmaps, self->vmlinux_maps[type]);
2148 void machine__destroy_kernel_maps(struct machine *self)
2152 for (type = 0; type < MAP__NR_TYPES; ++type) {
2155 if (self->vmlinux_maps[type] == NULL)
2158 kmap = map__kmap(self->vmlinux_maps[type]);
2159 map_groups__remove(&self->kmaps, self->vmlinux_maps[type]);
2160 if (kmap->ref_reloc_sym) {
2162 * ref_reloc_sym is shared among all maps, so free just
2165 if (type == MAP__FUNCTION) {
2166 free((char *)kmap->ref_reloc_sym->name);
2167 kmap->ref_reloc_sym->name = NULL;
2168 free(kmap->ref_reloc_sym);
2170 kmap->ref_reloc_sym = NULL;
2173 map__delete(self->vmlinux_maps[type]);
2174 self->vmlinux_maps[type] = NULL;
2178 int machine__create_kernel_maps(struct machine *self)
2180 struct dso *kernel = machine__create_kernel(self);
2182 if (kernel == NULL ||
2183 __machine__create_kernel_maps(self, kernel) < 0)
2186 if (symbol_conf.use_modules && machine__create_modules(self) < 0)
2187 pr_debug("Problems creating module maps, continuing anyway...\n");
2189 * Now that we have all the maps created, just set the ->end of them:
2191 map_groups__fixup_end(&self->kmaps);
2195 static void vmlinux_path__exit(void)
2197 while (--vmlinux_path__nr_entries >= 0) {
2198 free(vmlinux_path[vmlinux_path__nr_entries]);
2199 vmlinux_path[vmlinux_path__nr_entries] = NULL;
2203 vmlinux_path = NULL;
2206 static int vmlinux_path__init(void)
2211 if (uname(&uts) < 0)
2214 vmlinux_path = malloc(sizeof(char *) * 5);
2215 if (vmlinux_path == NULL)
2218 vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux");
2219 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2221 ++vmlinux_path__nr_entries;
2222 vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux");
2223 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2225 ++vmlinux_path__nr_entries;
2226 snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release);
2227 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
2228 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2230 ++vmlinux_path__nr_entries;
2231 snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", uts.release);
2232 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
2233 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2235 ++vmlinux_path__nr_entries;
2236 snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux",
2238 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
2239 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2241 ++vmlinux_path__nr_entries;
2246 vmlinux_path__exit();
2250 size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp)
2254 struct dso *kdso = self->vmlinux_maps[MAP__FUNCTION]->dso;
2256 if (kdso->has_build_id) {
2257 char filename[PATH_MAX];
2258 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
2259 printed += fprintf(fp, "[0] %s\n", filename);
2262 for (i = 0; i < vmlinux_path__nr_entries; ++i)
2263 printed += fprintf(fp, "[%d] %s\n",
2264 i + kdso->has_build_id, vmlinux_path[i]);
2269 static int setup_list(struct strlist **list, const char *list_str,
2270 const char *list_name)
2272 if (list_str == NULL)
2275 *list = strlist__new(true, list_str);
2277 pr_err("problems parsing %s list\n", list_name);
2283 int symbol__init(void)
2285 if (symbol_conf.initialized)
2288 elf_version(EV_CURRENT);
2289 if (symbol_conf.sort_by_name)
2290 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
2291 sizeof(struct symbol));
2293 if (symbol_conf.try_vmlinux_path && vmlinux_path__init() < 0)
2296 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
2297 pr_err("'.' is the only non valid --field-separator argument\n");
2301 if (setup_list(&symbol_conf.dso_list,
2302 symbol_conf.dso_list_str, "dso") < 0)
2305 if (setup_list(&symbol_conf.comm_list,
2306 symbol_conf.comm_list_str, "comm") < 0)
2307 goto out_free_dso_list;
2309 if (setup_list(&symbol_conf.sym_list,
2310 symbol_conf.sym_list_str, "symbol") < 0)
2311 goto out_free_comm_list;
2313 symbol_conf.initialized = true;
2317 strlist__delete(symbol_conf.dso_list);
2319 strlist__delete(symbol_conf.comm_list);
2323 void symbol__exit(void)
2325 if (!symbol_conf.initialized)
2327 strlist__delete(symbol_conf.sym_list);
2328 strlist__delete(symbol_conf.dso_list);
2329 strlist__delete(symbol_conf.comm_list);
2330 vmlinux_path__exit();
2331 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2332 symbol_conf.initialized = false;
2335 int machines__create_kernel_maps(struct rb_root *self, pid_t pid)
2337 struct machine *machine = machines__findnew(self, pid);
2339 if (machine == NULL)
2342 return machine__create_kernel_maps(machine);
2345 static int hex(char ch)
2347 if ((ch >= '0') && (ch <= '9'))
2349 if ((ch >= 'a') && (ch <= 'f'))
2350 return ch - 'a' + 10;
2351 if ((ch >= 'A') && (ch <= 'F'))
2352 return ch - 'A' + 10;
2357 * While we find nice hex chars, build a long_val.
2358 * Return number of chars processed.
2360 int hex2u64(const char *ptr, u64 *long_val)
2362 const char *p = ptr;
2366 const int hex_val = hex(*p);
2371 *long_val = (*long_val << 4) | hex_val;
2378 char *strxfrchar(char *s, char from, char to)
2382 while ((p = strchr(p, from)) != NULL)
2388 int machines__create_guest_kernel_maps(struct rb_root *self)
2391 struct dirent **namelist = NULL;
2393 char path[PATH_MAX];
2396 if (symbol_conf.default_guest_vmlinux_name ||
2397 symbol_conf.default_guest_modules ||
2398 symbol_conf.default_guest_kallsyms) {
2399 machines__create_kernel_maps(self, DEFAULT_GUEST_KERNEL_ID);
2402 if (symbol_conf.guestmount) {
2403 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
2406 for (i = 0; i < items; i++) {
2407 if (!isdigit(namelist[i]->d_name[0])) {
2408 /* Filter out . and .. */
2411 pid = atoi(namelist[i]->d_name);
2412 sprintf(path, "%s/%s/proc/kallsyms",
2413 symbol_conf.guestmount,
2414 namelist[i]->d_name);
2415 ret = access(path, R_OK);
2417 pr_debug("Can't access file %s\n", path);
2420 machines__create_kernel_maps(self, pid);
2429 void machines__destroy_guest_kernel_maps(struct rb_root *self)
2431 struct rb_node *next = rb_first(self);
2434 struct machine *pos = rb_entry(next, struct machine, rb_node);
2436 next = rb_next(&pos->rb_node);
2437 rb_erase(&pos->rb_node, self);
2438 machine__delete(pos);
2442 int machine__load_kallsyms(struct machine *self, const char *filename,
2443 enum map_type type, symbol_filter_t filter)
2445 struct map *map = self->vmlinux_maps[type];
2446 int ret = dso__load_kallsyms(map->dso, filename, map, filter);
2449 dso__set_loaded(map->dso, type);
2451 * Since /proc/kallsyms will have multiple sessions for the
2452 * kernel, with modules between them, fixup the end of all
2455 __map_groups__fixup_end(&self->kmaps, type);
2461 int machine__load_vmlinux_path(struct machine *self, enum map_type type,
2462 symbol_filter_t filter)
2464 struct map *map = self->vmlinux_maps[type];
2465 int ret = dso__load_vmlinux_path(map->dso, map, filter);
2468 dso__set_loaded(map->dso, type);
2469 map__reloc_vmlinux(map);