9024fa1ff5c226dc77d864ccb49e07dddda35118
[safe/jmp/linux-2.6] / tools / perf / util / thread.c
1 #include "../perf.h"
2 #include <stdlib.h>
3 #include <stdio.h>
4 #include <string.h>
5 #include "session.h"
6 #include "thread.h"
7 #include "util.h"
8 #include "debug.h"
9
10 void map_groups__init(struct map_groups *self)
11 {
12         int i;
13         for (i = 0; i < MAP__NR_TYPES; ++i) {
14                 self->maps[i] = RB_ROOT;
15                 INIT_LIST_HEAD(&self->removed_maps[i]);
16         }
17 }
18
19 static struct thread *thread__new(pid_t pid)
20 {
21         struct thread *self = zalloc(sizeof(*self));
22
23         if (self != NULL) {
24                 map_groups__init(&self->mg);
25                 self->pid = pid;
26                 self->comm = malloc(32);
27                 if (self->comm)
28                         snprintf(self->comm, 32, ":%d", self->pid);
29         }
30
31         return self;
32 }
33
34 static void map_groups__flush(struct map_groups *self)
35 {
36         int type;
37
38         for (type = 0; type < MAP__NR_TYPES; type++) {
39                 struct rb_root *root = &self->maps[type];
40                 struct rb_node *next = rb_first(root);
41
42                 while (next) {
43                         struct map *pos = rb_entry(next, struct map, rb_node);
44                         next = rb_next(&pos->rb_node);
45                         rb_erase(&pos->rb_node, root);
46                         /*
47                          * We may have references to this map, for
48                          * instance in some hist_entry instances, so
49                          * just move them to a separate list.
50                          */
51                         list_add_tail(&pos->node, &self->removed_maps[pos->type]);
52                 }
53         }
54 }
55
56 int thread__set_comm(struct thread *self, const char *comm)
57 {
58         int err;
59
60         if (self->comm)
61                 free(self->comm);
62         self->comm = strdup(comm);
63         err = self->comm == NULL ? -ENOMEM : 0;
64         if (!err) {
65                 self->comm_set = true;
66                 map_groups__flush(&self->mg);
67         }
68         return err;
69 }
70
71 int thread__comm_len(struct thread *self)
72 {
73         if (!self->comm_len) {
74                 if (!self->comm)
75                         return 0;
76                 self->comm_len = strlen(self->comm);
77         }
78
79         return self->comm_len;
80 }
81
82 static size_t __map_groups__fprintf_maps(struct map_groups *self,
83                                          enum map_type type, FILE *fp)
84 {
85         size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
86         struct rb_node *nd;
87
88         for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
89                 struct map *pos = rb_entry(nd, struct map, rb_node);
90                 printed += fprintf(fp, "Map:");
91                 printed += map__fprintf(pos, fp);
92                 if (verbose > 1) {
93                         printed += dso__fprintf(pos->dso, type, fp);
94                         printed += fprintf(fp, "--\n");
95                 }
96         }
97
98         return printed;
99 }
100
101 size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp)
102 {
103         size_t printed = 0, i;
104         for (i = 0; i < MAP__NR_TYPES; ++i)
105                 printed += __map_groups__fprintf_maps(self, i, fp);
106         return printed;
107 }
108
109 static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
110                                                  enum map_type type, FILE *fp)
111 {
112         struct map *pos;
113         size_t printed = 0;
114
115         list_for_each_entry(pos, &self->removed_maps[type], node) {
116                 printed += fprintf(fp, "Map:");
117                 printed += map__fprintf(pos, fp);
118                 if (verbose > 1) {
119                         printed += dso__fprintf(pos->dso, type, fp);
120                         printed += fprintf(fp, "--\n");
121                 }
122         }
123         return printed;
124 }
125
126 static size_t map_groups__fprintf_removed_maps(struct map_groups *self, FILE *fp)
127 {
128         size_t printed = 0, i;
129         for (i = 0; i < MAP__NR_TYPES; ++i)
130                 printed += __map_groups__fprintf_removed_maps(self, i, fp);
131         return printed;
132 }
133
134 static size_t map_groups__fprintf(struct map_groups *self, FILE *fp)
135 {
136         size_t printed = map_groups__fprintf_maps(self, fp);
137         printed += fprintf(fp, "Removed maps:\n");
138         return printed + map_groups__fprintf_removed_maps(self, fp);
139 }
140
141 static size_t thread__fprintf(struct thread *self, FILE *fp)
142 {
143         return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
144                map_groups__fprintf(&self->mg, fp);
145 }
146
147 struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
148 {
149         struct rb_node **p = &self->threads.rb_node;
150         struct rb_node *parent = NULL;
151         struct thread *th;
152
153         /*
154          * Font-end cache - PID lookups come in blocks,
155          * so most of the time we dont have to look up
156          * the full rbtree:
157          */
158         if (self->last_match && self->last_match->pid == pid)
159                 return self->last_match;
160
161         while (*p != NULL) {
162                 parent = *p;
163                 th = rb_entry(parent, struct thread, rb_node);
164
165                 if (th->pid == pid) {
166                         self->last_match = th;
167                         return th;
168                 }
169
170                 if (pid < th->pid)
171                         p = &(*p)->rb_left;
172                 else
173                         p = &(*p)->rb_right;
174         }
175
176         th = thread__new(pid);
177         if (th != NULL) {
178                 rb_link_node(&th->rb_node, parent, p);
179                 rb_insert_color(&th->rb_node, &self->threads);
180                 self->last_match = th;
181         }
182
183         return th;
184 }
185
186 static int map_groups__fixup_overlappings(struct map_groups *self,
187                                           struct map *map)
188 {
189         struct rb_root *root = &self->maps[map->type];
190         struct rb_node *next = rb_first(root);
191
192         while (next) {
193                 struct map *pos = rb_entry(next, struct map, rb_node);
194                 next = rb_next(&pos->rb_node);
195
196                 if (!map__overlap(pos, map))
197                         continue;
198
199                 if (verbose >= 2) {
200                         fputs("overlapping maps:\n", stderr);
201                         map__fprintf(map, stderr);
202                         map__fprintf(pos, stderr);
203                 }
204
205                 rb_erase(&pos->rb_node, root);
206                 /*
207                  * We may have references to this map, for instance in some
208                  * hist_entry instances, so just move them to a separate
209                  * list.
210                  */
211                 list_add_tail(&pos->node, &self->removed_maps[map->type]);
212                 /*
213                  * Now check if we need to create new maps for areas not
214                  * overlapped by the new map:
215                  */
216                 if (map->start > pos->start) {
217                         struct map *before = map__clone(pos);
218
219                         if (before == NULL)
220                                 return -ENOMEM;
221
222                         before->end = map->start - 1;
223                         map_groups__insert(self, before);
224                         if (verbose >= 2)
225                                 map__fprintf(before, stderr);
226                 }
227
228                 if (map->end < pos->end) {
229                         struct map *after = map__clone(pos);
230
231                         if (after == NULL)
232                                 return -ENOMEM;
233
234                         after->start = map->end + 1;
235                         map_groups__insert(self, after);
236                         if (verbose >= 2)
237                                 map__fprintf(after, stderr);
238                 }
239         }
240
241         return 0;
242 }
243
244 void maps__insert(struct rb_root *maps, struct map *map)
245 {
246         struct rb_node **p = &maps->rb_node;
247         struct rb_node *parent = NULL;
248         const u64 ip = map->start;
249         struct map *m;
250
251         while (*p != NULL) {
252                 parent = *p;
253                 m = rb_entry(parent, struct map, rb_node);
254                 if (ip < m->start)
255                         p = &(*p)->rb_left;
256                 else
257                         p = &(*p)->rb_right;
258         }
259
260         rb_link_node(&map->rb_node, parent, p);
261         rb_insert_color(&map->rb_node, maps);
262 }
263
264 struct map *maps__find(struct rb_root *maps, u64 ip)
265 {
266         struct rb_node **p = &maps->rb_node;
267         struct rb_node *parent = NULL;
268         struct map *m;
269
270         while (*p != NULL) {
271                 parent = *p;
272                 m = rb_entry(parent, struct map, rb_node);
273                 if (ip < m->start)
274                         p = &(*p)->rb_left;
275                 else if (ip > m->end)
276                         p = &(*p)->rb_right;
277                 else
278                         return m;
279         }
280
281         return NULL;
282 }
283
284 void thread__insert_map(struct thread *self, struct map *map)
285 {
286         map_groups__fixup_overlappings(&self->mg, map);
287         map_groups__insert(&self->mg, map);
288 }
289
290 /*
291  * XXX This should not really _copy_ te maps, but refcount them.
292  */
293 static int map_groups__clone(struct map_groups *self,
294                              struct map_groups *parent, enum map_type type)
295 {
296         struct rb_node *nd;
297         for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
298                 struct map *map = rb_entry(nd, struct map, rb_node);
299                 struct map *new = map__clone(map);
300                 if (new == NULL)
301                         return -ENOMEM;
302                 map_groups__insert(self, new);
303         }
304         return 0;
305 }
306
307 int thread__fork(struct thread *self, struct thread *parent)
308 {
309         int i;
310
311         if (parent->comm_set) {
312                 if (self->comm)
313                         free(self->comm);
314                 self->comm = strdup(parent->comm);
315                 if (!self->comm)
316                         return -ENOMEM;
317                 self->comm_set = true;
318         }
319
320         for (i = 0; i < MAP__NR_TYPES; ++i)
321                 if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
322                         return -ENOMEM;
323         return 0;
324 }
325
326 size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
327 {
328         size_t ret = 0;
329         struct rb_node *nd;
330
331         for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) {
332                 struct thread *pos = rb_entry(nd, struct thread, rb_node);
333
334                 ret += thread__fprintf(pos, fp);
335         }
336
337         return ret;
338 }
339
340 struct symbol *map_groups__find_symbol(struct map_groups *self,
341                                        enum map_type type, u64 addr,
342                                        symbol_filter_t filter)
343 {
344         struct map *map = map_groups__find(self, type, addr);
345
346         if (map != NULL)
347                 return map__find_symbol(map, map->map_ip(map, addr), filter);
348
349         return NULL;
350 }