perf symbols: Move map related routines to map.c
[safe/jmp/linux-2.6] / tools / perf / util / thread.c
1 #include "../perf.h"
2 #include <stdlib.h>
3 #include <stdio.h>
4 #include <string.h>
5 #include "session.h"
6 #include "thread.h"
7 #include "util.h"
8 #include "debug.h"
9
10 int find_all_tid(int pid, pid_t ** all_tid)
11 {
12         char name[256];
13         int items;
14         struct dirent **namelist = NULL;
15         int ret = 0;
16         int i;
17
18         sprintf(name, "/proc/%d/task", pid);
19         items = scandir(name, &namelist, NULL, NULL);
20         if (items <= 0)
21                 return -ENOENT;
22         *all_tid = malloc(sizeof(pid_t) * items);
23         if (!*all_tid) {
24                 ret = -ENOMEM;
25                 goto failure;
26         }
27
28         for (i = 0; i < items; i++)
29                 (*all_tid)[i] = atoi(namelist[i]->d_name);
30
31         ret = items;
32
33 failure:
34         for (i=0; i<items; i++)
35                 free(namelist[i]);
36         free(namelist);
37
38         return ret;
39 }
40
41 void map_groups__init(struct map_groups *self)
42 {
43         int i;
44         for (i = 0; i < MAP__NR_TYPES; ++i) {
45                 self->maps[i] = RB_ROOT;
46                 INIT_LIST_HEAD(&self->removed_maps[i]);
47         }
48 }
49
50 static struct thread *thread__new(pid_t pid)
51 {
52         struct thread *self = zalloc(sizeof(*self));
53
54         if (self != NULL) {
55                 map_groups__init(&self->mg);
56                 self->pid = pid;
57                 self->comm = malloc(32);
58                 if (self->comm)
59                         snprintf(self->comm, 32, ":%d", self->pid);
60         }
61
62         return self;
63 }
64
65 static void map_groups__flush(struct map_groups *self)
66 {
67         int type;
68
69         for (type = 0; type < MAP__NR_TYPES; type++) {
70                 struct rb_root *root = &self->maps[type];
71                 struct rb_node *next = rb_first(root);
72
73                 while (next) {
74                         struct map *pos = rb_entry(next, struct map, rb_node);
75                         next = rb_next(&pos->rb_node);
76                         rb_erase(&pos->rb_node, root);
77                         /*
78                          * We may have references to this map, for
79                          * instance in some hist_entry instances, so
80                          * just move them to a separate list.
81                          */
82                         list_add_tail(&pos->node, &self->removed_maps[pos->type]);
83                 }
84         }
85 }
86
87 int thread__set_comm(struct thread *self, const char *comm)
88 {
89         int err;
90
91         if (self->comm)
92                 free(self->comm);
93         self->comm = strdup(comm);
94         err = self->comm == NULL ? -ENOMEM : 0;
95         if (!err) {
96                 self->comm_set = true;
97                 map_groups__flush(&self->mg);
98         }
99         return err;
100 }
101
102 int thread__comm_len(struct thread *self)
103 {
104         if (!self->comm_len) {
105                 if (!self->comm)
106                         return 0;
107                 self->comm_len = strlen(self->comm);
108         }
109
110         return self->comm_len;
111 }
112
113 size_t __map_groups__fprintf_maps(struct map_groups *self,
114                                   enum map_type type, FILE *fp)
115 {
116         size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
117         struct rb_node *nd;
118
119         for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
120                 struct map *pos = rb_entry(nd, struct map, rb_node);
121                 printed += fprintf(fp, "Map:");
122                 printed += map__fprintf(pos, fp);
123                 if (verbose > 2) {
124                         printed += dso__fprintf(pos->dso, type, fp);
125                         printed += fprintf(fp, "--\n");
126                 }
127         }
128
129         return printed;
130 }
131
132 size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp)
133 {
134         size_t printed = 0, i;
135         for (i = 0; i < MAP__NR_TYPES; ++i)
136                 printed += __map_groups__fprintf_maps(self, i, fp);
137         return printed;
138 }
139
140 static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
141                                                  enum map_type type, FILE *fp)
142 {
143         struct map *pos;
144         size_t printed = 0;
145
146         list_for_each_entry(pos, &self->removed_maps[type], node) {
147                 printed += fprintf(fp, "Map:");
148                 printed += map__fprintf(pos, fp);
149                 if (verbose > 1) {
150                         printed += dso__fprintf(pos->dso, type, fp);
151                         printed += fprintf(fp, "--\n");
152                 }
153         }
154         return printed;
155 }
156
157 static size_t map_groups__fprintf_removed_maps(struct map_groups *self, FILE *fp)
158 {
159         size_t printed = 0, i;
160         for (i = 0; i < MAP__NR_TYPES; ++i)
161                 printed += __map_groups__fprintf_removed_maps(self, i, fp);
162         return printed;
163 }
164
165 static size_t map_groups__fprintf(struct map_groups *self, FILE *fp)
166 {
167         size_t printed = map_groups__fprintf_maps(self, fp);
168         printed += fprintf(fp, "Removed maps:\n");
169         return printed + map_groups__fprintf_removed_maps(self, fp);
170 }
171
172 static size_t thread__fprintf(struct thread *self, FILE *fp)
173 {
174         return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
175                map_groups__fprintf(&self->mg, fp);
176 }
177
178 struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
179 {
180         struct rb_node **p = &self->threads.rb_node;
181         struct rb_node *parent = NULL;
182         struct thread *th;
183
184         /*
185          * Font-end cache - PID lookups come in blocks,
186          * so most of the time we dont have to look up
187          * the full rbtree:
188          */
189         if (self->last_match && self->last_match->pid == pid)
190                 return self->last_match;
191
192         while (*p != NULL) {
193                 parent = *p;
194                 th = rb_entry(parent, struct thread, rb_node);
195
196                 if (th->pid == pid) {
197                         self->last_match = th;
198                         return th;
199                 }
200
201                 if (pid < th->pid)
202                         p = &(*p)->rb_left;
203                 else
204                         p = &(*p)->rb_right;
205         }
206
207         th = thread__new(pid);
208         if (th != NULL) {
209                 rb_link_node(&th->rb_node, parent, p);
210                 rb_insert_color(&th->rb_node, &self->threads);
211                 self->last_match = th;
212         }
213
214         return th;
215 }
216
217 static int map_groups__fixup_overlappings(struct map_groups *self,
218                                           struct map *map)
219 {
220         struct rb_root *root = &self->maps[map->type];
221         struct rb_node *next = rb_first(root);
222
223         while (next) {
224                 struct map *pos = rb_entry(next, struct map, rb_node);
225                 next = rb_next(&pos->rb_node);
226
227                 if (!map__overlap(pos, map))
228                         continue;
229
230                 if (verbose >= 2) {
231                         fputs("overlapping maps:\n", stderr);
232                         map__fprintf(map, stderr);
233                         map__fprintf(pos, stderr);
234                 }
235
236                 rb_erase(&pos->rb_node, root);
237                 /*
238                  * We may have references to this map, for instance in some
239                  * hist_entry instances, so just move them to a separate
240                  * list.
241                  */
242                 list_add_tail(&pos->node, &self->removed_maps[map->type]);
243                 /*
244                  * Now check if we need to create new maps for areas not
245                  * overlapped by the new map:
246                  */
247                 if (map->start > pos->start) {
248                         struct map *before = map__clone(pos);
249
250                         if (before == NULL)
251                                 return -ENOMEM;
252
253                         before->end = map->start - 1;
254                         map_groups__insert(self, before);
255                         if (verbose >= 2)
256                                 map__fprintf(before, stderr);
257                 }
258
259                 if (map->end < pos->end) {
260                         struct map *after = map__clone(pos);
261
262                         if (after == NULL)
263                                 return -ENOMEM;
264
265                         after->start = map->end + 1;
266                         map_groups__insert(self, after);
267                         if (verbose >= 2)
268                                 map__fprintf(after, stderr);
269                 }
270         }
271
272         return 0;
273 }
274
275 void thread__insert_map(struct thread *self, struct map *map)
276 {
277         map_groups__fixup_overlappings(&self->mg, map);
278         map_groups__insert(&self->mg, map);
279 }
280
281 /*
282  * XXX This should not really _copy_ te maps, but refcount them.
283  */
284 static int map_groups__clone(struct map_groups *self,
285                              struct map_groups *parent, enum map_type type)
286 {
287         struct rb_node *nd;
288         for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
289                 struct map *map = rb_entry(nd, struct map, rb_node);
290                 struct map *new = map__clone(map);
291                 if (new == NULL)
292                         return -ENOMEM;
293                 map_groups__insert(self, new);
294         }
295         return 0;
296 }
297
298 int thread__fork(struct thread *self, struct thread *parent)
299 {
300         int i;
301
302         if (parent->comm_set) {
303                 if (self->comm)
304                         free(self->comm);
305                 self->comm = strdup(parent->comm);
306                 if (!self->comm)
307                         return -ENOMEM;
308                 self->comm_set = true;
309         }
310
311         for (i = 0; i < MAP__NR_TYPES; ++i)
312                 if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
313                         return -ENOMEM;
314         return 0;
315 }
316
317 size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
318 {
319         size_t ret = 0;
320         struct rb_node *nd;
321
322         for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) {
323                 struct thread *pos = rb_entry(nd, struct thread, rb_node);
324
325                 ret += thread__fprintf(pos, fp);
326         }
327
328         return ret;
329 }