3 #include <sys/resource.h>
12 char dso__symtab_origin(const struct dso *dso)
14 static const char origin[] = {
15 [DSO_BINARY_TYPE__KALLSYMS] = 'k',
16 [DSO_BINARY_TYPE__VMLINUX] = 'v',
17 [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
18 [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
19 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
20 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
21 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
22 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
23 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
24 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
25 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
26 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm',
27 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
28 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
29 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M',
30 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
33 if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
35 return origin[dso->symtab_type];
38 int dso__read_binary_type_filename(const struct dso *dso,
39 enum dso_binary_type type,
40 char *root_dir, char *filename, size_t size)
42 char build_id_hex[SBUILD_ID_SIZE];
47 case DSO_BINARY_TYPE__DEBUGLINK: {
50 len = __symbol__join_symfs(filename, size, dso->long_name);
51 debuglink = filename + len;
52 while (debuglink != filename && *debuglink != '/')
54 if (*debuglink == '/')
58 if (!is_regular_file(filename))
61 ret = filename__read_debuglink(filename, debuglink,
62 size - (debuglink - filename));
65 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
66 if (dso__build_id_filename(dso, filename, size) == NULL)
70 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
71 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
72 snprintf(filename + len, size - len, "%s.debug", dso->long_name);
75 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
76 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
77 snprintf(filename + len, size - len, "%s", dso->long_name);
80 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
82 const char *last_slash;
85 last_slash = dso->long_name + dso->long_name_len;
86 while (last_slash != dso->long_name && *last_slash != '/')
89 len = __symbol__join_symfs(filename, size, "");
90 dir_size = last_slash - dso->long_name + 2;
91 if (dir_size > (size - len)) {
95 len += scnprintf(filename + len, dir_size, "%s", dso->long_name);
96 len += scnprintf(filename + len , size - len, ".debug%s",
101 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
102 if (!dso->has_build_id) {
107 build_id__sprintf(dso->build_id,
108 sizeof(dso->build_id),
110 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
111 snprintf(filename + len, size - len, "%.2s/%s.debug",
112 build_id_hex, build_id_hex + 2);
115 case DSO_BINARY_TYPE__VMLINUX:
116 case DSO_BINARY_TYPE__GUEST_VMLINUX:
117 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
118 __symbol__join_symfs(filename, size, dso->long_name);
121 case DSO_BINARY_TYPE__GUEST_KMODULE:
122 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
123 path__join3(filename, size, symbol_conf.symfs,
124 root_dir, dso->long_name);
127 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
128 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
129 __symbol__join_symfs(filename, size, dso->long_name);
132 case DSO_BINARY_TYPE__KCORE:
133 case DSO_BINARY_TYPE__GUEST_KCORE:
134 snprintf(filename, size, "%s", dso->long_name);
138 case DSO_BINARY_TYPE__KALLSYMS:
139 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
140 case DSO_BINARY_TYPE__JAVA_JIT:
141 case DSO_BINARY_TYPE__NOT_FOUND:
149 static const struct {
151 int (*decompress)(const char *input, int output);
153 #ifdef HAVE_ZLIB_SUPPORT
154 { "gz", gzip_decompress_to_file },
156 #ifdef HAVE_LZMA_SUPPORT
157 { "xz", lzma_decompress_to_file },
162 bool is_supported_compression(const char *ext)
166 for (i = 0; compressions[i].fmt; i++) {
167 if (!strcmp(ext, compressions[i].fmt))
173 bool is_kernel_module(const char *pathname, int cpumode)
176 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
178 WARN_ONCE(mode != cpumode,
179 "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
183 case PERF_RECORD_MISC_USER:
184 case PERF_RECORD_MISC_HYPERVISOR:
185 case PERF_RECORD_MISC_GUEST_USER:
187 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
189 if (kmod_path__parse(&m, pathname)) {
190 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
199 bool decompress_to_file(const char *ext, const char *filename, int output_fd)
203 for (i = 0; compressions[i].fmt; i++) {
204 if (!strcmp(ext, compressions[i].fmt))
205 return !compressions[i].decompress(filename,
211 bool dso__needs_decompress(struct dso *dso)
213 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
214 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
218 * Parses kernel module specified in @path and updates
221 * @comp - true if @path contains supported compression suffix,
223 * @kmod - true if @path contains '.ko' suffix in right position,
225 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
226 * of the kernel module without suffixes, otherwise strudup-ed
228 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string
229 * the compression suffix
231 * Returns 0 if there's no strdup error, -ENOMEM otherwise.
233 int __kmod_path__parse(struct kmod_path *m, const char *path,
234 bool alloc_name, bool alloc_ext)
236 const char *name = strrchr(path, '/');
237 const char *ext = strrchr(path, '.');
238 bool is_simple_name = false;
240 memset(m, 0x0, sizeof(*m));
241 name = name ? name + 1 : path;
244 * '.' is also a valid character for module name. For example:
245 * [aaa.bbb] is a valid module name. '[' should have higher
246 * priority than '.ko' suffix.
248 * The kernel names are from machine__mmap_name. Such
249 * name should belong to kernel itself, not kernel module.
251 if (name[0] == '[') {
252 is_simple_name = true;
253 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
254 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
255 (strncmp(name, "[vdso]", 6) == 0) ||
256 (strncmp(name, "[vsyscall]", 10) == 0)) {
263 /* No extension, just return name. */
264 if ((ext == NULL) || is_simple_name) {
266 m->name = strdup(name);
267 return m->name ? 0 : -ENOMEM;
272 if (is_supported_compression(ext + 1)) {
277 /* Check .ko extension only if there's enough name left. */
279 m->kmod = !strncmp(ext, ".ko", 3);
283 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
286 if (asprintf(&m->name, "%s", name) == -1)
290 strxfrchar(m->name, '-', '_');
293 if (alloc_ext && m->comp) {
294 m->ext = strdup(ext + 4);
296 free((void *) m->name);
305 * Global list of open DSOs and the counter.
307 static LIST_HEAD(dso__data_open);
308 static long dso__data_open_cnt;
309 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
311 static void dso__list_add(struct dso *dso)
313 list_add_tail(&dso->data.open_entry, &dso__data_open);
314 dso__data_open_cnt++;
317 static void dso__list_del(struct dso *dso)
319 list_del(&dso->data.open_entry);
320 WARN_ONCE(dso__data_open_cnt <= 0,
321 "DSO data fd counter out of bounds.");
322 dso__data_open_cnt--;
325 static void close_first_dso(void);
327 static int do_open(char *name)
330 char sbuf[STRERR_BUFSIZE];
333 fd = open(name, O_RDONLY);
337 pr_debug("dso open failed: %s\n",
338 str_error_r(errno, sbuf, sizeof(sbuf)));
339 if (!dso__data_open_cnt || errno != EMFILE)
348 static int __open_dso(struct dso *dso, struct machine *machine)
351 char *root_dir = (char *)"";
352 char *name = malloc(PATH_MAX);
358 root_dir = machine->root_dir;
360 if (dso__read_binary_type_filename(dso, dso->binary_type,
361 root_dir, name, PATH_MAX)) {
371 static void check_data_close(void);
374 * dso_close - Open DSO data file
377 * Open @dso's data file descriptor and updates
378 * list/count of open DSO objects.
380 static int open_dso(struct dso *dso, struct machine *machine)
382 int fd = __open_dso(dso, machine);
387 * Check if we crossed the allowed number
388 * of opened DSOs and close one if needed.
396 static void close_data_fd(struct dso *dso)
398 if (dso->data.fd >= 0) {
401 dso->data.file_size = 0;
407 * dso_close - Close DSO data file
410 * Close @dso's data file descriptor and updates
411 * list/count of open DSO objects.
413 static void close_dso(struct dso *dso)
418 static void close_first_dso(void)
422 dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
426 static rlim_t get_fd_limit(void)
431 /* Allow half of the current open fd limit. */
432 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
433 if (l.rlim_cur == RLIM_INFINITY)
436 limit = l.rlim_cur / 2;
438 pr_err("failed to get fd limit\n");
445 static rlim_t fd_limit;
448 * Used only by tests/dso-data.c to reset the environment
449 * for tests. I dont expect we should change this during
452 void reset_fd_limit(void)
457 static bool may_cache_fd(void)
460 fd_limit = get_fd_limit();
462 if (fd_limit == RLIM_INFINITY)
465 return fd_limit > (rlim_t) dso__data_open_cnt;
469 * Check and close LRU dso if we crossed allowed limit
470 * for opened dso file descriptors. The limit is half
471 * of the RLIMIT_NOFILE files opened.
473 static void check_data_close(void)
475 bool cache_fd = may_cache_fd();
482 * dso__data_close - Close DSO data file
485 * External interface to close @dso's data file descriptor.
487 void dso__data_close(struct dso *dso)
489 pthread_mutex_lock(&dso__data_open_lock);
491 pthread_mutex_unlock(&dso__data_open_lock);
494 static void try_to_open_dso(struct dso *dso, struct machine *machine)
496 enum dso_binary_type binary_type_data[] = {
497 DSO_BINARY_TYPE__BUILD_ID_CACHE,
498 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
499 DSO_BINARY_TYPE__NOT_FOUND,
503 if (dso->data.fd >= 0)
506 if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
507 dso->data.fd = open_dso(dso, machine);
512 dso->binary_type = binary_type_data[i++];
514 dso->data.fd = open_dso(dso, machine);
515 if (dso->data.fd >= 0)
518 } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
520 if (dso->data.fd >= 0)
521 dso->data.status = DSO_DATA_STATUS_OK;
523 dso->data.status = DSO_DATA_STATUS_ERROR;
527 * dso__data_get_fd - Get dso's data file descriptor
529 * @machine: machine object
531 * External interface to find dso's file, open it and
532 * returns file descriptor. It should be paired with
533 * dso__data_put_fd() if it returns non-negative value.
535 int dso__data_get_fd(struct dso *dso, struct machine *machine)
537 if (dso->data.status == DSO_DATA_STATUS_ERROR)
540 if (pthread_mutex_lock(&dso__data_open_lock) < 0)
543 try_to_open_dso(dso, machine);
545 if (dso->data.fd < 0)
546 pthread_mutex_unlock(&dso__data_open_lock);
551 void dso__data_put_fd(struct dso *dso __maybe_unused)
553 pthread_mutex_unlock(&dso__data_open_lock);
556 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
560 if (dso->data.status_seen & flag)
563 dso->data.status_seen |= flag;
569 dso_cache__free(struct dso *dso)
571 struct rb_root *root = &dso->data.cache;
572 struct rb_node *next = rb_first(root);
574 pthread_mutex_lock(&dso->lock);
576 struct dso_cache *cache;
578 cache = rb_entry(next, struct dso_cache, rb_node);
579 next = rb_next(&cache->rb_node);
580 rb_erase(&cache->rb_node, root);
583 pthread_mutex_unlock(&dso->lock);
586 static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
588 const struct rb_root *root = &dso->data.cache;
589 struct rb_node * const *p = &root->rb_node;
590 const struct rb_node *parent = NULL;
591 struct dso_cache *cache;
597 cache = rb_entry(parent, struct dso_cache, rb_node);
598 end = cache->offset + DSO__DATA_CACHE_SIZE;
600 if (offset < cache->offset)
602 else if (offset >= end)
611 static struct dso_cache *
612 dso_cache__insert(struct dso *dso, struct dso_cache *new)
614 struct rb_root *root = &dso->data.cache;
615 struct rb_node **p = &root->rb_node;
616 struct rb_node *parent = NULL;
617 struct dso_cache *cache;
618 u64 offset = new->offset;
620 pthread_mutex_lock(&dso->lock);
625 cache = rb_entry(parent, struct dso_cache, rb_node);
626 end = cache->offset + DSO__DATA_CACHE_SIZE;
628 if (offset < cache->offset)
630 else if (offset >= end)
636 rb_link_node(&new->rb_node, parent, p);
637 rb_insert_color(&new->rb_node, root);
641 pthread_mutex_unlock(&dso->lock);
646 dso_cache__memcpy(struct dso_cache *cache, u64 offset,
649 u64 cache_offset = offset - cache->offset;
650 u64 cache_size = min(cache->size - cache_offset, size);
652 memcpy(data, cache->data + cache_offset, cache_size);
657 dso_cache__read(struct dso *dso, struct machine *machine,
658 u64 offset, u8 *data, ssize_t size)
660 struct dso_cache *cache;
661 struct dso_cache *old;
667 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
671 pthread_mutex_lock(&dso__data_open_lock);
674 * dso->data.fd might be closed if other thread opened another
675 * file (dso) due to open file limit (RLIMIT_NOFILE).
677 try_to_open_dso(dso, machine);
679 if (dso->data.fd < 0) {
681 dso->data.status = DSO_DATA_STATUS_ERROR;
685 cache_offset = offset & DSO__DATA_CACHE_MASK;
687 ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset);
691 cache->offset = cache_offset;
695 pthread_mutex_unlock(&dso__data_open_lock);
698 old = dso_cache__insert(dso, cache);
700 /* we lose the race */
705 ret = dso_cache__memcpy(cache, offset, data, size);
714 static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
715 u64 offset, u8 *data, ssize_t size)
717 struct dso_cache *cache;
719 cache = dso_cache__find(dso, offset);
721 return dso_cache__memcpy(cache, offset, data, size);
723 return dso_cache__read(dso, machine, offset, data, size);
727 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
728 * in the rb_tree. Any read to already cached data is served
731 static ssize_t cached_read(struct dso *dso, struct machine *machine,
732 u64 offset, u8 *data, ssize_t size)
740 ret = dso_cache_read(dso, machine, offset, p, size);
744 /* Reached EOF, return what we have. */
760 static int data_file_size(struct dso *dso, struct machine *machine)
764 char sbuf[STRERR_BUFSIZE];
766 if (dso->data.file_size)
769 if (dso->data.status == DSO_DATA_STATUS_ERROR)
772 pthread_mutex_lock(&dso__data_open_lock);
775 * dso->data.fd might be closed if other thread opened another
776 * file (dso) due to open file limit (RLIMIT_NOFILE).
778 try_to_open_dso(dso, machine);
780 if (dso->data.fd < 0) {
782 dso->data.status = DSO_DATA_STATUS_ERROR;
786 if (fstat(dso->data.fd, &st) < 0) {
788 pr_err("dso cache fstat failed: %s\n",
789 str_error_r(errno, sbuf, sizeof(sbuf)));
790 dso->data.status = DSO_DATA_STATUS_ERROR;
793 dso->data.file_size = st.st_size;
796 pthread_mutex_unlock(&dso__data_open_lock);
801 * dso__data_size - Return dso data size
803 * @machine: machine object
805 * Return: dso data size
807 off_t dso__data_size(struct dso *dso, struct machine *machine)
809 if (data_file_size(dso, machine))
812 /* For now just estimate dso data size is close to file size */
813 return dso->data.file_size;
816 static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
817 u64 offset, u8 *data, ssize_t size)
819 if (data_file_size(dso, machine))
822 /* Check the offset sanity. */
823 if (offset > dso->data.file_size)
826 if (offset + size < offset)
829 return cached_read(dso, machine, offset, data, size);
833 * dso__data_read_offset - Read data from dso file offset
835 * @machine: machine object
836 * @offset: file offset
837 * @data: buffer to store data
838 * @size: size of the @data buffer
840 * External interface to read data from dso file offset. Open
841 * dso data file and use cached_read to get the data.
843 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
844 u64 offset, u8 *data, ssize_t size)
846 if (dso->data.status == DSO_DATA_STATUS_ERROR)
849 return data_read_offset(dso, machine, offset, data, size);
853 * dso__data_read_addr - Read data from dso address
855 * @machine: machine object
856 * @add: virtual memory address
857 * @data: buffer to store data
858 * @size: size of the @data buffer
860 * External interface to read data from dso address.
862 ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
863 struct machine *machine, u64 addr,
864 u8 *data, ssize_t size)
866 u64 offset = map->map_ip(map, addr);
867 return dso__data_read_offset(dso, machine, offset, data, size);
870 struct map *dso__new_map(const char *name)
872 struct map *map = NULL;
873 struct dso *dso = dso__new(name);
876 map = map__new2(0, dso, MAP__FUNCTION);
881 struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
882 const char *short_name, int dso_type)
885 * The kernel dso could be created by build_id processing.
887 struct dso *dso = machine__findnew_dso(machine, name);
890 * We need to run this in all cases, since during the build_id
891 * processing we had no idea this was the kernel dso.
894 dso__set_short_name(dso, short_name, false);
895 dso->kernel = dso_type;
902 * Find a matching entry and/or link current entry to RB tree.
903 * Either one of the dso or name parameter must be non-NULL or the
904 * function will not work.
906 static struct dso *__dso__findlink_by_longname(struct rb_root *root,
907 struct dso *dso, const char *name)
909 struct rb_node **p = &root->rb_node;
910 struct rb_node *parent = NULL;
913 name = dso->long_name;
915 * Find node with the matching name
918 struct dso *this = rb_entry(*p, struct dso, rb_node);
919 int rc = strcmp(name, this->long_name);
924 * In case the new DSO is a duplicate of an existing
925 * one, print an one-time warning & put the new entry
926 * at the end of the list of duplicates.
928 if (!dso || (dso == this))
929 return this; /* Find matching dso */
931 * The core kernel DSOs may have duplicated long name.
932 * In this case, the short name should be different.
933 * Comparing the short names to differentiate the DSOs.
935 rc = strcmp(dso->short_name, this->short_name);
937 pr_err("Duplicated dso name: %s\n", name);
942 p = &parent->rb_left;
944 p = &parent->rb_right;
947 /* Add new node and rebalance tree */
948 rb_link_node(&dso->rb_node, parent, p);
949 rb_insert_color(&dso->rb_node, root);
955 static inline struct dso *__dso__find_by_longname(struct rb_root *root,
958 return __dso__findlink_by_longname(root, NULL, name);
961 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
963 struct rb_root *root = dso->root;
968 if (dso->long_name_allocated)
969 free((char *)dso->long_name);
972 rb_erase(&dso->rb_node, root);
974 * __dso__findlink_by_longname() isn't guaranteed to add it
975 * back, so a clean removal is required here.
977 RB_CLEAR_NODE(&dso->rb_node);
981 dso->long_name = name;
982 dso->long_name_len = strlen(name);
983 dso->long_name_allocated = name_allocated;
986 __dso__findlink_by_longname(root, dso, NULL);
989 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
994 if (dso->short_name_allocated)
995 free((char *)dso->short_name);
997 dso->short_name = name;
998 dso->short_name_len = strlen(name);
999 dso->short_name_allocated = name_allocated;
1002 static void dso__set_basename(struct dso *dso)
1005 * basename() may modify path buffer, so we must pass
1008 char *base, *lname = strdup(dso->long_name);
1014 * basename() may return a pointer to internal
1015 * storage which is reused in subsequent calls
1016 * so copy the result.
1018 base = strdup(basename(lname));
1025 dso__set_short_name(dso, base, true);
1028 int dso__name_len(const struct dso *dso)
1031 return strlen("[unknown]");
1033 return dso->long_name_len;
1035 return dso->short_name_len;
1038 bool dso__loaded(const struct dso *dso, enum map_type type)
1040 return dso->loaded & (1 << type);
1043 bool dso__sorted_by_name(const struct dso *dso, enum map_type type)
1045 return dso->sorted_by_name & (1 << type);
1048 void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
1050 dso->sorted_by_name |= (1 << type);
1053 struct dso *dso__new(const char *name)
1055 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
1059 strcpy(dso->name, name);
1060 dso__set_long_name(dso, dso->name, false);
1061 dso__set_short_name(dso, dso->name, false);
1062 for (i = 0; i < MAP__NR_TYPES; ++i)
1063 dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
1064 dso->data.cache = RB_ROOT;
1066 dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1067 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1068 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1069 dso->is_64_bit = (sizeof(void *) == 8);
1072 dso->sorted_by_name = 0;
1073 dso->has_build_id = 0;
1074 dso->has_srcline = 1;
1076 dso->kernel = DSO_TYPE_USER;
1077 dso->needs_swap = DSO_SWAP__UNSET;
1078 RB_CLEAR_NODE(&dso->rb_node);
1080 INIT_LIST_HEAD(&dso->node);
1081 INIT_LIST_HEAD(&dso->data.open_entry);
1082 pthread_mutex_init(&dso->lock, NULL);
1083 atomic_set(&dso->refcnt, 1);
1089 void dso__delete(struct dso *dso)
1093 if (!RB_EMPTY_NODE(&dso->rb_node))
1094 pr_err("DSO %s is still in rbtree when being deleted!\n",
1096 for (i = 0; i < MAP__NR_TYPES; ++i)
1097 symbols__delete(&dso->symbols[i]);
1099 if (dso->short_name_allocated) {
1100 zfree((char **)&dso->short_name);
1101 dso->short_name_allocated = false;
1104 if (dso->long_name_allocated) {
1105 zfree((char **)&dso->long_name);
1106 dso->long_name_allocated = false;
1109 dso__data_close(dso);
1110 auxtrace_cache__free(dso->auxtrace_cache);
1111 dso_cache__free(dso);
1113 zfree(&dso->symsrc_filename);
1114 pthread_mutex_destroy(&dso->lock);
1118 struct dso *dso__get(struct dso *dso)
1121 atomic_inc(&dso->refcnt);
1125 void dso__put(struct dso *dso)
1127 if (dso && atomic_dec_and_test(&dso->refcnt))
1131 void dso__set_build_id(struct dso *dso, void *build_id)
1133 memcpy(dso->build_id, build_id, sizeof(dso->build_id));
1134 dso->has_build_id = 1;
1137 bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
1139 return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
1142 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1144 char path[PATH_MAX];
1146 if (machine__is_default_guest(machine))
1148 sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1149 if (sysfs__read_build_id(path, dso->build_id,
1150 sizeof(dso->build_id)) == 0)
1151 dso->has_build_id = true;
1154 int dso__kernel_module_get_build_id(struct dso *dso,
1155 const char *root_dir)
1157 char filename[PATH_MAX];
1159 * kernel module short names are of the form "[module]" and
1160 * we need just "module" here.
1162 const char *name = dso->short_name + 1;
1164 snprintf(filename, sizeof(filename),
1165 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1166 root_dir, (int)strlen(name) - 1, name);
1168 if (sysfs__read_build_id(filename, dso->build_id,
1169 sizeof(dso->build_id)) == 0)
1170 dso->has_build_id = true;
1175 bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
1177 bool have_build_id = false;
1180 list_for_each_entry(pos, head, node) {
1181 if (with_hits && !pos->hit && !dso__is_vdso(pos))
1183 if (pos->has_build_id) {
1184 have_build_id = true;
1187 if (filename__read_build_id(pos->long_name, pos->build_id,
1188 sizeof(pos->build_id)) > 0) {
1189 have_build_id = true;
1190 pos->has_build_id = true;
1194 return have_build_id;
1197 void __dsos__add(struct dsos *dsos, struct dso *dso)
1199 list_add_tail(&dso->node, &dsos->head);
1200 __dso__findlink_by_longname(&dsos->root, dso, NULL);
1202 * It is now in the linked list, grab a reference, then garbage collect
1203 * this when needing memory, by looking at LRU dso instances in the
1204 * list with atomic_read(&dso->refcnt) == 1, i.e. no references
1205 * anywhere besides the one for the list, do, under a lock for the
1206 * list: remove it from the list, then a dso__put(), that probably will
1207 * be the last and will then call dso__delete(), end of life.
1209 * That, or at the end of the 'struct machine' lifetime, when all
1210 * 'struct dso' instances will be removed from the list, in
1211 * dsos__exit(), if they have no other reference from some other data
1214 * E.g.: after processing a 'perf.data' file and storing references
1215 * to objects instantiated while processing events, we will have
1216 * references to the 'thread', 'map', 'dso' structs all from 'struct
1217 * hist_entry' instances, but we may not need anything not referenced,
1218 * so we might as well call machines__exit()/machines__delete() and
1219 * garbage collect it.
1224 void dsos__add(struct dsos *dsos, struct dso *dso)
1226 pthread_rwlock_wrlock(&dsos->lock);
1227 __dsos__add(dsos, dso);
1228 pthread_rwlock_unlock(&dsos->lock);
1231 struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1236 list_for_each_entry(pos, &dsos->head, node)
1237 if (strcmp(pos->short_name, name) == 0)
1241 return __dso__find_by_longname(&dsos->root, name);
1244 struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1247 pthread_rwlock_rdlock(&dsos->lock);
1248 dso = __dsos__find(dsos, name, cmp_short);
1249 pthread_rwlock_unlock(&dsos->lock);
1253 struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
1255 struct dso *dso = dso__new(name);
1258 __dsos__add(dsos, dso);
1259 dso__set_basename(dso);
1260 /* Put dso here because __dsos_add already got it */
1266 struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
1268 struct dso *dso = __dsos__find(dsos, name, false);
1270 return dso ? dso : __dsos__addnew(dsos, name);
1273 struct dso *dsos__findnew(struct dsos *dsos, const char *name)
1276 pthread_rwlock_wrlock(&dsos->lock);
1277 dso = dso__get(__dsos__findnew(dsos, name));
1278 pthread_rwlock_unlock(&dsos->lock);
1282 size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
1283 bool (skip)(struct dso *dso, int parm), int parm)
1288 list_for_each_entry(pos, head, node) {
1289 if (skip && skip(pos, parm))
1291 ret += dso__fprintf_buildid(pos, fp);
1292 ret += fprintf(fp, " %s\n", pos->long_name);
1297 size_t __dsos__fprintf(struct list_head *head, FILE *fp)
1302 list_for_each_entry(pos, head, node) {
1304 for (i = 0; i < MAP__NR_TYPES; ++i)
1305 ret += dso__fprintf(pos, i, fp);
1311 size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1313 char sbuild_id[SBUILD_ID_SIZE];
1315 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1316 return fprintf(fp, "%s", sbuild_id);
1319 size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
1322 size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
1324 if (dso->short_name != dso->long_name)
1325 ret += fprintf(fp, "%s, ", dso->long_name);
1326 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
1327 dso__loaded(dso, type) ? "" : "NOT ");
1328 ret += dso__fprintf_buildid(dso, fp);
1329 ret += fprintf(fp, ")\n");
1330 for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
1331 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1332 ret += symbol__fprintf(pos, fp);
1338 enum dso_type dso__type(struct dso *dso, struct machine *machine)
1341 enum dso_type type = DSO__TYPE_UNKNOWN;
1343 fd = dso__data_get_fd(dso, machine);
1345 type = dso__type_fd(fd);
1346 dso__data_put_fd(dso);
1352 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1354 int idx, errnum = dso->load_errno;
1356 * This must have a same ordering as the enum dso_load_errno.
1358 static const char *dso_load__error_str[] = {
1359 "Internal tools/perf/ library error",
1361 "Can not read build id",
1362 "Mismatching build id",
1363 "Decompression failure",
1366 BUG_ON(buflen == 0);
1369 const char *err = str_error_r(errnum, buf, buflen);
1372 scnprintf(buf, buflen, "%s", err);
1377 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1380 idx = errnum - __DSO_LOAD_ERRNO__START;
1381 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);