10 static struct thread *thread__new(pid_t pid)
12 struct thread *self = zalloc(sizeof(*self));
15 map_groups__init(&self->mg);
17 self->comm = malloc(32);
19 snprintf(self->comm, 32, ":%d", self->pid);
25 void thread__delete(struct thread *self)
27 map_groups__exit(&self->mg);
32 int thread__set_comm(struct thread *self, const char *comm)
38 self->comm = strdup(comm);
39 err = self->comm == NULL ? -ENOMEM : 0;
41 self->comm_set = true;
46 int thread__comm_len(struct thread *self)
48 if (!self->comm_len) {
51 self->comm_len = strlen(self->comm);
54 return self->comm_len;
57 static size_t thread__fprintf(struct thread *self, FILE *fp)
59 return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
60 map_groups__fprintf(&self->mg, verbose, fp);
63 struct thread *machine__findnew_thread(struct machine *self, pid_t pid)
65 struct rb_node **p = &self->threads.rb_node;
66 struct rb_node *parent = NULL;
70 * Font-end cache - PID lookups come in blocks,
71 * so most of the time we dont have to look up
74 if (self->last_match && self->last_match->pid == pid)
75 return self->last_match;
79 th = rb_entry(parent, struct thread, rb_node);
82 self->last_match = th;
92 th = thread__new(pid);
94 rb_link_node(&th->rb_node, parent, p);
95 rb_insert_color(&th->rb_node, &self->threads);
96 self->last_match = th;
102 void thread__insert_map(struct thread *self, struct map *map)
104 map_groups__fixup_overlappings(&self->mg, map, verbose, stderr);
105 map_groups__insert(&self->mg, map);
108 int thread__fork(struct thread *self, struct thread *parent)
112 if (parent->comm_set) {
115 self->comm = strdup(parent->comm);
118 self->comm_set = true;
121 for (i = 0; i < MAP__NR_TYPES; ++i)
122 if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
127 size_t machine__fprintf(struct machine *machine, FILE *fp)
132 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
133 struct thread *pos = rb_entry(nd, struct thread, rb_node);
135 ret += thread__fprintf(pos, fp);