9 static struct rb_root threads;
10 static struct thread *last_match;
12 static struct thread *thread__new(pid_t pid)
14 struct thread *self = zalloc(sizeof(*self));
18 self->comm = malloc(32);
20 snprintf(self->comm, 32, ":%d", self->pid);
22 INIT_LIST_HEAD(&self->removed_maps);
28 int thread__set_comm(struct thread *self, const char *comm)
32 self->comm = strdup(comm);
33 return self->comm ? 0 : -ENOMEM;
36 int thread__comm_len(struct thread *self)
38 if (!self->comm_len) {
41 self->comm_len = strlen(self->comm);
44 return self->comm_len;
47 static size_t thread__fprintf(struct thread *self, FILE *fp)
51 size_t ret = fprintf(fp, "Thread %d %s\nCurrent maps:\n",
52 self->pid, self->comm);
54 for (nd = rb_first(&self->maps); nd; nd = rb_next(nd)) {
55 pos = rb_entry(nd, struct map, rb_node);
56 ret += map__fprintf(pos, fp);
59 ret = fprintf(fp, "Removed maps:\n");
61 list_for_each_entry(pos, &self->removed_maps, node)
62 ret += map__fprintf(pos, fp);
67 struct thread *threads__findnew(pid_t pid)
69 struct rb_node **p = &threads.rb_node;
70 struct rb_node *parent = NULL;
74 * Font-end cache - PID lookups come in blocks,
75 * so most of the time we dont have to look up
78 if (last_match && last_match->pid == pid)
83 th = rb_entry(parent, struct thread, rb_node);
96 th = thread__new(pid);
98 rb_link_node(&th->rb_node, parent, p);
99 rb_insert_color(&th->rb_node, &threads);
106 struct thread *register_idle_thread(void)
108 struct thread *thread = threads__findnew(0);
110 if (!thread || thread__set_comm(thread, "swapper")) {
111 fprintf(stderr, "problem inserting idle task.\n");
118 static void thread__remove_overlappings(struct thread *self, struct map *map)
120 struct rb_node *next = rb_first(&self->maps);
123 struct map *pos = rb_entry(next, struct map, rb_node);
124 next = rb_next(&pos->rb_node);
126 if (!map__overlap(pos, map))
130 fputs("overlapping maps:\n", stderr);
131 map__fprintf(map, stderr);
132 map__fprintf(pos, stderr);
135 rb_erase(&pos->rb_node, &self->maps);
137 * We may have references to this map, for instance in some
138 * hist_entry instances, so just move them to a separate
141 list_add_tail(&pos->node, &self->removed_maps);
145 void maps__insert(struct rb_root *maps, struct map *map)
147 struct rb_node **p = &maps->rb_node;
148 struct rb_node *parent = NULL;
149 const u64 ip = map->start;
154 m = rb_entry(parent, struct map, rb_node);
161 rb_link_node(&map->rb_node, parent, p);
162 rb_insert_color(&map->rb_node, maps);
165 struct map *maps__find(struct rb_root *maps, u64 ip)
167 struct rb_node **p = &maps->rb_node;
168 struct rb_node *parent = NULL;
173 m = rb_entry(parent, struct map, rb_node);
176 else if (ip > m->end)
185 void thread__insert_map(struct thread *self, struct map *map)
187 thread__remove_overlappings(self, map);
188 maps__insert(&self->maps, map);
191 int thread__fork(struct thread *self, struct thread *parent)
197 self->comm = strdup(parent->comm);
201 for (nd = rb_first(&parent->maps); nd; nd = rb_next(nd)) {
202 struct map *map = rb_entry(nd, struct map, rb_node);
203 struct map *new = map__clone(map);
206 thread__insert_map(self, new);
212 size_t threads__fprintf(FILE *fp)
217 for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
218 struct thread *pos = rb_entry(nd, struct thread, rb_node);
220 ret += thread__fprintf(pos, fp);