10 int find_all_tid(int pid, pid_t ** all_tid)
14 struct dirent **namelist = NULL;
18 sprintf(name, "/proc/%d/task", pid);
19 items = scandir(name, &namelist, NULL, NULL);
22 *all_tid = malloc(sizeof(pid_t) * items);
28 for (i = 0; i < items; i++)
29 (*all_tid)[i] = atoi(namelist[i]->d_name);
34 for (i=0; i<items; i++)
41 void map_groups__init(struct map_groups *self)
44 for (i = 0; i < MAP__NR_TYPES; ++i) {
45 self->maps[i] = RB_ROOT;
46 INIT_LIST_HEAD(&self->removed_maps[i]);
50 static struct thread *thread__new(pid_t pid)
52 struct thread *self = zalloc(sizeof(*self));
55 map_groups__init(&self->mg);
57 self->comm = malloc(32);
59 snprintf(self->comm, 32, ":%d", self->pid);
65 static void map_groups__flush(struct map_groups *self)
69 for (type = 0; type < MAP__NR_TYPES; type++) {
70 struct rb_root *root = &self->maps[type];
71 struct rb_node *next = rb_first(root);
74 struct map *pos = rb_entry(next, struct map, rb_node);
75 next = rb_next(&pos->rb_node);
76 rb_erase(&pos->rb_node, root);
78 * We may have references to this map, for
79 * instance in some hist_entry instances, so
80 * just move them to a separate list.
82 list_add_tail(&pos->node, &self->removed_maps[pos->type]);
87 int thread__set_comm(struct thread *self, const char *comm)
93 self->comm = strdup(comm);
94 err = self->comm == NULL ? -ENOMEM : 0;
96 self->comm_set = true;
97 map_groups__flush(&self->mg);
102 int thread__comm_len(struct thread *self)
104 if (!self->comm_len) {
107 self->comm_len = strlen(self->comm);
110 return self->comm_len;
113 size_t __map_groups__fprintf_maps(struct map_groups *self,
114 enum map_type type, FILE *fp)
116 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
119 for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
120 struct map *pos = rb_entry(nd, struct map, rb_node);
121 printed += fprintf(fp, "Map:");
122 printed += map__fprintf(pos, fp);
124 printed += dso__fprintf(pos->dso, type, fp);
125 printed += fprintf(fp, "--\n");
132 size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp)
134 size_t printed = 0, i;
135 for (i = 0; i < MAP__NR_TYPES; ++i)
136 printed += __map_groups__fprintf_maps(self, i, fp);
140 static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
141 enum map_type type, FILE *fp)
146 list_for_each_entry(pos, &self->removed_maps[type], node) {
147 printed += fprintf(fp, "Map:");
148 printed += map__fprintf(pos, fp);
150 printed += dso__fprintf(pos->dso, type, fp);
151 printed += fprintf(fp, "--\n");
157 static size_t map_groups__fprintf_removed_maps(struct map_groups *self, FILE *fp)
159 size_t printed = 0, i;
160 for (i = 0; i < MAP__NR_TYPES; ++i)
161 printed += __map_groups__fprintf_removed_maps(self, i, fp);
165 static size_t map_groups__fprintf(struct map_groups *self, FILE *fp)
167 size_t printed = map_groups__fprintf_maps(self, fp);
168 printed += fprintf(fp, "Removed maps:\n");
169 return printed + map_groups__fprintf_removed_maps(self, fp);
172 static size_t thread__fprintf(struct thread *self, FILE *fp)
174 return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
175 map_groups__fprintf(&self->mg, fp);
178 struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
180 struct rb_node **p = &self->threads.rb_node;
181 struct rb_node *parent = NULL;
185 * Font-end cache - PID lookups come in blocks,
186 * so most of the time we dont have to look up
189 if (self->last_match && self->last_match->pid == pid)
190 return self->last_match;
194 th = rb_entry(parent, struct thread, rb_node);
196 if (th->pid == pid) {
197 self->last_match = th;
207 th = thread__new(pid);
209 rb_link_node(&th->rb_node, parent, p);
210 rb_insert_color(&th->rb_node, &self->threads);
211 self->last_match = th;
217 static int map_groups__fixup_overlappings(struct map_groups *self,
220 struct rb_root *root = &self->maps[map->type];
221 struct rb_node *next = rb_first(root);
224 struct map *pos = rb_entry(next, struct map, rb_node);
225 next = rb_next(&pos->rb_node);
227 if (!map__overlap(pos, map))
231 fputs("overlapping maps:\n", stderr);
232 map__fprintf(map, stderr);
233 map__fprintf(pos, stderr);
236 rb_erase(&pos->rb_node, root);
238 * We may have references to this map, for instance in some
239 * hist_entry instances, so just move them to a separate
242 list_add_tail(&pos->node, &self->removed_maps[map->type]);
244 * Now check if we need to create new maps for areas not
245 * overlapped by the new map:
247 if (map->start > pos->start) {
248 struct map *before = map__clone(pos);
253 before->end = map->start - 1;
254 map_groups__insert(self, before);
256 map__fprintf(before, stderr);
259 if (map->end < pos->end) {
260 struct map *after = map__clone(pos);
265 after->start = map->end + 1;
266 map_groups__insert(self, after);
268 map__fprintf(after, stderr);
275 void thread__insert_map(struct thread *self, struct map *map)
277 map_groups__fixup_overlappings(&self->mg, map);
278 map_groups__insert(&self->mg, map);
282 * XXX This should not really _copy_ te maps, but refcount them.
284 static int map_groups__clone(struct map_groups *self,
285 struct map_groups *parent, enum map_type type)
288 for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
289 struct map *map = rb_entry(nd, struct map, rb_node);
290 struct map *new = map__clone(map);
293 map_groups__insert(self, new);
298 int thread__fork(struct thread *self, struct thread *parent)
302 if (parent->comm_set) {
305 self->comm = strdup(parent->comm);
308 self->comm_set = true;
311 for (i = 0; i < MAP__NR_TYPES; ++i)
312 if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
317 size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
322 for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) {
323 struct thread *pos = rb_entry(nd, struct thread, rb_node);
325 ret += thread__fprintf(pos, fp);