perf tools: Introduce zalloc() for the common calloc(1, N) case
[safe/jmp/linux-2.6] / tools / perf / util / thread.c
1 #include "../perf.h"
2 #include <stdlib.h>
3 #include <stdio.h>
4 #include <string.h>
5 #include "thread.h"
6 #include "util.h"
7 #include "debug.h"
8
9 static struct rb_root threads;
10 static struct thread *last_match;
11
12 static struct thread *thread__new(pid_t pid)
13 {
14         struct thread *self = zalloc(sizeof(*self));
15
16         if (self != NULL) {
17                 self->pid = pid;
18                 self->comm = malloc(32);
19                 if (self->comm)
20                         snprintf(self->comm, 32, ":%d", self->pid);
21                 self->maps = RB_ROOT;
22                 INIT_LIST_HEAD(&self->removed_maps);
23         }
24
25         return self;
26 }
27
28 int thread__set_comm(struct thread *self, const char *comm)
29 {
30         if (self->comm)
31                 free(self->comm);
32         self->comm = strdup(comm);
33         return self->comm ? 0 : -ENOMEM;
34 }
35
36 int thread__comm_len(struct thread *self)
37 {
38         if (!self->comm_len) {
39                 if (!self->comm)
40                         return 0;
41                 self->comm_len = strlen(self->comm);
42         }
43
44         return self->comm_len;
45 }
46
47 static size_t thread__fprintf(struct thread *self, FILE *fp)
48 {
49         struct rb_node *nd;
50         struct map *pos;
51         size_t ret = fprintf(fp, "Thread %d %s\nCurrent maps:\n",
52                              self->pid, self->comm);
53
54         for (nd = rb_first(&self->maps); nd; nd = rb_next(nd)) {
55                 pos = rb_entry(nd, struct map, rb_node);
56                 ret += map__fprintf(pos, fp);
57         }
58
59         ret = fprintf(fp, "Removed maps:\n");
60
61         list_for_each_entry(pos, &self->removed_maps, node)
62                 ret += map__fprintf(pos, fp);
63
64         return ret;
65 }
66
67 struct thread *threads__findnew(pid_t pid)
68 {
69         struct rb_node **p = &threads.rb_node;
70         struct rb_node *parent = NULL;
71         struct thread *th;
72
73         /*
74          * Font-end cache - PID lookups come in blocks,
75          * so most of the time we dont have to look up
76          * the full rbtree:
77          */
78         if (last_match && last_match->pid == pid)
79                 return last_match;
80
81         while (*p != NULL) {
82                 parent = *p;
83                 th = rb_entry(parent, struct thread, rb_node);
84
85                 if (th->pid == pid) {
86                         last_match = th;
87                         return th;
88                 }
89
90                 if (pid < th->pid)
91                         p = &(*p)->rb_left;
92                 else
93                         p = &(*p)->rb_right;
94         }
95
96         th = thread__new(pid);
97         if (th != NULL) {
98                 rb_link_node(&th->rb_node, parent, p);
99                 rb_insert_color(&th->rb_node, &threads);
100                 last_match = th;
101         }
102
103         return th;
104 }
105
106 struct thread *register_idle_thread(void)
107 {
108         struct thread *thread = threads__findnew(0);
109
110         if (!thread || thread__set_comm(thread, "swapper")) {
111                 fprintf(stderr, "problem inserting idle task.\n");
112                 exit(-1);
113         }
114
115         return thread;
116 }
117
118 static void thread__remove_overlappings(struct thread *self, struct map *map)
119 {
120         struct rb_node *next = rb_first(&self->maps);
121
122         while (next) {
123                 struct map *pos = rb_entry(next, struct map, rb_node);
124                 next = rb_next(&pos->rb_node);
125
126                 if (!map__overlap(pos, map))
127                         continue;
128
129                 if (verbose >= 2) {
130                         fputs("overlapping maps:\n", stderr);
131                         map__fprintf(map, stderr);
132                         map__fprintf(pos, stderr);
133                 }
134
135                 rb_erase(&pos->rb_node, &self->maps);
136                 /*
137                  * We may have references to this map, for instance in some
138                  * hist_entry instances, so just move them to a separate
139                  * list.
140                  */
141                 list_add_tail(&pos->node, &self->removed_maps);
142         }
143 }
144
145 void maps__insert(struct rb_root *maps, struct map *map)
146 {
147         struct rb_node **p = &maps->rb_node;
148         struct rb_node *parent = NULL;
149         const u64 ip = map->start;
150         struct map *m;
151
152         while (*p != NULL) {
153                 parent = *p;
154                 m = rb_entry(parent, struct map, rb_node);
155                 if (ip < m->start)
156                         p = &(*p)->rb_left;
157                 else
158                         p = &(*p)->rb_right;
159         }
160
161         rb_link_node(&map->rb_node, parent, p);
162         rb_insert_color(&map->rb_node, maps);
163 }
164
165 struct map *maps__find(struct rb_root *maps, u64 ip)
166 {
167         struct rb_node **p = &maps->rb_node;
168         struct rb_node *parent = NULL;
169         struct map *m;
170
171         while (*p != NULL) {
172                 parent = *p;
173                 m = rb_entry(parent, struct map, rb_node);
174                 if (ip < m->start)
175                         p = &(*p)->rb_left;
176                 else if (ip > m->end)
177                         p = &(*p)->rb_right;
178                 else
179                         return m;
180         }
181
182         return NULL;
183 }
184
185 void thread__insert_map(struct thread *self, struct map *map)
186 {
187         thread__remove_overlappings(self, map);
188         maps__insert(&self->maps, map);
189 }
190
191 int thread__fork(struct thread *self, struct thread *parent)
192 {
193         struct rb_node *nd;
194
195         if (self->comm)
196                 free(self->comm);
197         self->comm = strdup(parent->comm);
198         if (!self->comm)
199                 return -ENOMEM;
200
201         for (nd = rb_first(&parent->maps); nd; nd = rb_next(nd)) {
202                 struct map *map = rb_entry(nd, struct map, rb_node);
203                 struct map *new = map__clone(map);
204                 if (!new)
205                         return -ENOMEM;
206                 thread__insert_map(self, new);
207         }
208
209         return 0;
210 }
211
212 size_t threads__fprintf(FILE *fp)
213 {
214         size_t ret = 0;
215         struct rb_node *nd;
216
217         for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
218                 struct thread *pos = rb_entry(nd, struct thread, rb_node);
219
220                 ret += thread__fprintf(pos, fp);
221         }
222
223         return ret;
224 }