Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
[safe/jmp/linux-2.6] / kernel / cgroup_freezer.c
1 /*
2  * cgroup_freezer.c -  control group freezer subsystem
3  *
4  * Copyright IBM Corporation, 2007
5  *
6  * Author : Cedric Le Goater <clg@fr.ibm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of version 2.1 of the GNU Lesser General Public License
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it would be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15  */
16
17 #include <linux/module.h>
18 #include <linux/cgroup.h>
19 #include <linux/fs.h>
20 #include <linux/uaccess.h>
21 #include <linux/freezer.h>
22 #include <linux/seq_file.h>
23
24 enum freezer_state {
25         CGROUP_THAWED = 0,
26         CGROUP_FREEZING,
27         CGROUP_FROZEN,
28 };
29
30 struct freezer {
31         struct cgroup_subsys_state css;
32         enum freezer_state state;
33         spinlock_t lock; /* protects _writes_ to state */
34 };
35
36 static inline struct freezer *cgroup_freezer(
37                 struct cgroup *cgroup)
38 {
39         return container_of(
40                 cgroup_subsys_state(cgroup, freezer_subsys_id),
41                 struct freezer, css);
42 }
43
44 static inline struct freezer *task_freezer(struct task_struct *task)
45 {
46         return container_of(task_subsys_state(task, freezer_subsys_id),
47                             struct freezer, css);
48 }
49
50 int cgroup_freezing_or_frozen(struct task_struct *task)
51 {
52         struct freezer *freezer;
53         enum freezer_state state;
54
55         task_lock(task);
56         freezer = task_freezer(task);
57         if (!freezer->css.cgroup->parent)
58                 state = CGROUP_THAWED; /* root cgroup can't be frozen */
59         else
60                 state = freezer->state;
61         task_unlock(task);
62
63         return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
64 }
65
66 /*
67  * cgroups_write_string() limits the size of freezer state strings to
68  * CGROUP_LOCAL_BUFFER_SIZE
69  */
70 static const char *freezer_state_strs[] = {
71         "THAWED",
72         "FREEZING",
73         "FROZEN",
74 };
75
76 /*
77  * State diagram
78  * Transitions are caused by userspace writes to the freezer.state file.
79  * The values in parenthesis are state labels. The rest are edge labels.
80  *
81  * (THAWED) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
82  *    ^ ^                    |                     |
83  *    | \_______THAWED_______/                     |
84  *    \__________________________THAWED____________/
85  */
86
87 struct cgroup_subsys freezer_subsys;
88
89 /* Locks taken and their ordering
90  * ------------------------------
91  * css_set_lock
92  * cgroup_mutex (AKA cgroup_lock)
93  * task->alloc_lock (AKA task_lock)
94  * freezer->lock
95  * task->sighand->siglock
96  *
97  * cgroup code forces css_set_lock to be taken before task->alloc_lock
98  *
99  * freezer_create(), freezer_destroy():
100  * cgroup_mutex [ by cgroup core ]
101  *
102  * can_attach():
103  * cgroup_mutex
104  *
105  * cgroup_frozen():
106  * task->alloc_lock (to get task's cgroup)
107  *
108  * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
109  * task->alloc_lock (to get task's cgroup)
110  * freezer->lock
111  *  sighand->siglock (if the cgroup is freezing)
112  *
113  * freezer_read():
114  * cgroup_mutex
115  *  freezer->lock
116  *   read_lock css_set_lock (cgroup iterator start)
117  *
118  * freezer_write() (freeze):
119  * cgroup_mutex
120  *  freezer->lock
121  *   read_lock css_set_lock (cgroup iterator start)
122  *    sighand->siglock
123  *
124  * freezer_write() (unfreeze):
125  * cgroup_mutex
126  *  freezer->lock
127  *   read_lock css_set_lock (cgroup iterator start)
128  *    task->alloc_lock (to prevent races with freeze_task())
129  *     sighand->siglock
130  */
131 static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
132                                                   struct cgroup *cgroup)
133 {
134         struct freezer *freezer;
135
136         freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
137         if (!freezer)
138                 return ERR_PTR(-ENOMEM);
139
140         spin_lock_init(&freezer->lock);
141         freezer->state = CGROUP_THAWED;
142         return &freezer->css;
143 }
144
145 static void freezer_destroy(struct cgroup_subsys *ss,
146                             struct cgroup *cgroup)
147 {
148         kfree(cgroup_freezer(cgroup));
149 }
150
151 /* Task is frozen or will freeze immediately when next it gets woken */
152 static bool is_task_frozen_enough(struct task_struct *task)
153 {
154         return frozen(task) ||
155                 (task_is_stopped_or_traced(task) && freezing(task));
156 }
157
158 /*
159  * The call to cgroup_lock() in the freezer.state write method prevents
160  * a write to that file racing against an attach, and hence the
161  * can_attach() result will remain valid until the attach completes.
162  */
163 static int freezer_can_attach(struct cgroup_subsys *ss,
164                               struct cgroup *new_cgroup,
165                               struct task_struct *task, bool threadgroup)
166 {
167         struct freezer *freezer;
168
169         /*
170          * Anything frozen can't move or be moved to/from.
171          *
172          * Since orig_freezer->state == FROZEN means that @task has been
173          * frozen, so it's sufficient to check the latter condition.
174          */
175
176         if (is_task_frozen_enough(task))
177                 return -EBUSY;
178
179         freezer = cgroup_freezer(new_cgroup);
180         if (freezer->state == CGROUP_FROZEN)
181                 return -EBUSY;
182
183         if (threadgroup) {
184                 struct task_struct *c;
185
186                 rcu_read_lock();
187                 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
188                         if (is_task_frozen_enough(c)) {
189                                 rcu_read_unlock();
190                                 return -EBUSY;
191                         }
192                 }
193                 rcu_read_unlock();
194         }
195
196         return 0;
197 }
198
199 static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
200 {
201         struct freezer *freezer;
202
203         /*
204          * No lock is needed, since the task isn't on tasklist yet,
205          * so it can't be moved to another cgroup, which means the
206          * freezer won't be removed and will be valid during this
207          * function call.
208          */
209         freezer = task_freezer(task);
210
211         /*
212          * The root cgroup is non-freezable, so we can skip the
213          * following check.
214          */
215         if (!freezer->css.cgroup->parent)
216                 return;
217
218         spin_lock_irq(&freezer->lock);
219         BUG_ON(freezer->state == CGROUP_FROZEN);
220
221         /* Locking avoids race with FREEZING -> THAWED transitions. */
222         if (freezer->state == CGROUP_FREEZING)
223                 freeze_task(task, true);
224         spin_unlock_irq(&freezer->lock);
225 }
226
227 /*
228  * caller must hold freezer->lock
229  */
230 static void update_freezer_state(struct cgroup *cgroup,
231                                  struct freezer *freezer)
232 {
233         struct cgroup_iter it;
234         struct task_struct *task;
235         unsigned int nfrozen = 0, ntotal = 0;
236
237         cgroup_iter_start(cgroup, &it);
238         while ((task = cgroup_iter_next(cgroup, &it))) {
239                 ntotal++;
240                 if (is_task_frozen_enough(task))
241                         nfrozen++;
242         }
243
244         /*
245          * Transition to FROZEN when no new tasks can be added ensures
246          * that we never exist in the FROZEN state while there are unfrozen
247          * tasks.
248          */
249         if (nfrozen == ntotal)
250                 freezer->state = CGROUP_FROZEN;
251         else if (nfrozen > 0)
252                 freezer->state = CGROUP_FREEZING;
253         else
254                 freezer->state = CGROUP_THAWED;
255         cgroup_iter_end(cgroup, &it);
256 }
257
258 static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
259                         struct seq_file *m)
260 {
261         struct freezer *freezer;
262         enum freezer_state state;
263
264         if (!cgroup_lock_live_group(cgroup))
265                 return -ENODEV;
266
267         freezer = cgroup_freezer(cgroup);
268         spin_lock_irq(&freezer->lock);
269         state = freezer->state;
270         if (state == CGROUP_FREEZING) {
271                 /* We change from FREEZING to FROZEN lazily if the cgroup was
272                  * only partially frozen when we exitted write. */
273                 update_freezer_state(cgroup, freezer);
274                 state = freezer->state;
275         }
276         spin_unlock_irq(&freezer->lock);
277         cgroup_unlock();
278
279         seq_puts(m, freezer_state_strs[state]);
280         seq_putc(m, '\n');
281         return 0;
282 }
283
284 static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
285 {
286         struct cgroup_iter it;
287         struct task_struct *task;
288         unsigned int num_cant_freeze_now = 0;
289
290         freezer->state = CGROUP_FREEZING;
291         cgroup_iter_start(cgroup, &it);
292         while ((task = cgroup_iter_next(cgroup, &it))) {
293                 if (!freeze_task(task, true))
294                         continue;
295                 if (is_task_frozen_enough(task))
296                         continue;
297                 if (!freezing(task) && !freezer_should_skip(task))
298                         num_cant_freeze_now++;
299         }
300         cgroup_iter_end(cgroup, &it);
301
302         return num_cant_freeze_now ? -EBUSY : 0;
303 }
304
305 static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
306 {
307         struct cgroup_iter it;
308         struct task_struct *task;
309
310         cgroup_iter_start(cgroup, &it);
311         while ((task = cgroup_iter_next(cgroup, &it))) {
312                 thaw_process(task);
313         }
314         cgroup_iter_end(cgroup, &it);
315
316         freezer->state = CGROUP_THAWED;
317 }
318
319 static int freezer_change_state(struct cgroup *cgroup,
320                                 enum freezer_state goal_state)
321 {
322         struct freezer *freezer;
323         int retval = 0;
324
325         freezer = cgroup_freezer(cgroup);
326
327         spin_lock_irq(&freezer->lock);
328
329         update_freezer_state(cgroup, freezer);
330         if (goal_state == freezer->state)
331                 goto out;
332
333         switch (goal_state) {
334         case CGROUP_THAWED:
335                 unfreeze_cgroup(cgroup, freezer);
336                 break;
337         case CGROUP_FROZEN:
338                 retval = try_to_freeze_cgroup(cgroup, freezer);
339                 break;
340         default:
341                 BUG();
342         }
343 out:
344         spin_unlock_irq(&freezer->lock);
345
346         return retval;
347 }
348
349 static int freezer_write(struct cgroup *cgroup,
350                          struct cftype *cft,
351                          const char *buffer)
352 {
353         int retval;
354         enum freezer_state goal_state;
355
356         if (strcmp(buffer, freezer_state_strs[CGROUP_THAWED]) == 0)
357                 goal_state = CGROUP_THAWED;
358         else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0)
359                 goal_state = CGROUP_FROZEN;
360         else
361                 return -EINVAL;
362
363         if (!cgroup_lock_live_group(cgroup))
364                 return -ENODEV;
365         retval = freezer_change_state(cgroup, goal_state);
366         cgroup_unlock();
367         return retval;
368 }
369
370 static struct cftype files[] = {
371         {
372                 .name = "state",
373                 .read_seq_string = freezer_read,
374                 .write_string = freezer_write,
375         },
376 };
377
378 static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup)
379 {
380         if (!cgroup->parent)
381                 return 0;
382         return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files));
383 }
384
385 struct cgroup_subsys freezer_subsys = {
386         .name           = "freezer",
387         .create         = freezer_create,
388         .destroy        = freezer_destroy,
389         .populate       = freezer_populate,
390         .subsys_id      = freezer_subsys_id,
391         .can_attach     = freezer_can_attach,
392         .attach         = NULL,
393         .fork           = freezer_fork,
394         .exit           = NULL,
395 };