include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / kernel / cgroup_freezer.c
1 /*
2  * cgroup_freezer.c -  control group freezer subsystem
3  *
4  * Copyright IBM Corporation, 2007
5  *
6  * Author : Cedric Le Goater <clg@fr.ibm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of version 2.1 of the GNU Lesser General Public License
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it would be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15  */
16
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/cgroup.h>
20 #include <linux/fs.h>
21 #include <linux/uaccess.h>
22 #include <linux/freezer.h>
23 #include <linux/seq_file.h>
24
25 enum freezer_state {
26         CGROUP_THAWED = 0,
27         CGROUP_FREEZING,
28         CGROUP_FROZEN,
29 };
30
31 struct freezer {
32         struct cgroup_subsys_state css;
33         enum freezer_state state;
34         spinlock_t lock; /* protects _writes_ to state */
35 };
36
37 static inline struct freezer *cgroup_freezer(
38                 struct cgroup *cgroup)
39 {
40         return container_of(
41                 cgroup_subsys_state(cgroup, freezer_subsys_id),
42                 struct freezer, css);
43 }
44
45 static inline struct freezer *task_freezer(struct task_struct *task)
46 {
47         return container_of(task_subsys_state(task, freezer_subsys_id),
48                             struct freezer, css);
49 }
50
51 int cgroup_frozen(struct task_struct *task)
52 {
53         struct freezer *freezer;
54         enum freezer_state state;
55
56         task_lock(task);
57         freezer = task_freezer(task);
58         state = freezer->state;
59         task_unlock(task);
60
61         return state == CGROUP_FROZEN;
62 }
63
64 /*
65  * cgroups_write_string() limits the size of freezer state strings to
66  * CGROUP_LOCAL_BUFFER_SIZE
67  */
68 static const char *freezer_state_strs[] = {
69         "THAWED",
70         "FREEZING",
71         "FROZEN",
72 };
73
74 /*
75  * State diagram
76  * Transitions are caused by userspace writes to the freezer.state file.
77  * The values in parenthesis are state labels. The rest are edge labels.
78  *
79  * (THAWED) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
80  *    ^ ^                    |                     |
81  *    | \_______THAWED_______/                     |
82  *    \__________________________THAWED____________/
83  */
84
85 struct cgroup_subsys freezer_subsys;
86
87 /* Locks taken and their ordering
88  * ------------------------------
89  * css_set_lock
90  * cgroup_mutex (AKA cgroup_lock)
91  * task->alloc_lock (AKA task_lock)
92  * freezer->lock
93  * task->sighand->siglock
94  *
95  * cgroup code forces css_set_lock to be taken before task->alloc_lock
96  *
97  * freezer_create(), freezer_destroy():
98  * cgroup_mutex [ by cgroup core ]
99  *
100  * can_attach():
101  * cgroup_mutex
102  *
103  * cgroup_frozen():
104  * task->alloc_lock (to get task's cgroup)
105  *
106  * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
107  * task->alloc_lock (to get task's cgroup)
108  * freezer->lock
109  *  sighand->siglock (if the cgroup is freezing)
110  *
111  * freezer_read():
112  * cgroup_mutex
113  *  freezer->lock
114  *   read_lock css_set_lock (cgroup iterator start)
115  *
116  * freezer_write() (freeze):
117  * cgroup_mutex
118  *  freezer->lock
119  *   read_lock css_set_lock (cgroup iterator start)
120  *    sighand->siglock
121  *
122  * freezer_write() (unfreeze):
123  * cgroup_mutex
124  *  freezer->lock
125  *   read_lock css_set_lock (cgroup iterator start)
126  *    task->alloc_lock (to prevent races with freeze_task())
127  *     sighand->siglock
128  */
129 static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
130                                                   struct cgroup *cgroup)
131 {
132         struct freezer *freezer;
133
134         freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
135         if (!freezer)
136                 return ERR_PTR(-ENOMEM);
137
138         spin_lock_init(&freezer->lock);
139         freezer->state = CGROUP_THAWED;
140         return &freezer->css;
141 }
142
143 static void freezer_destroy(struct cgroup_subsys *ss,
144                             struct cgroup *cgroup)
145 {
146         kfree(cgroup_freezer(cgroup));
147 }
148
149 /* Task is frozen or will freeze immediately when next it gets woken */
150 static bool is_task_frozen_enough(struct task_struct *task)
151 {
152         return frozen(task) ||
153                 (task_is_stopped_or_traced(task) && freezing(task));
154 }
155
156 /*
157  * The call to cgroup_lock() in the freezer.state write method prevents
158  * a write to that file racing against an attach, and hence the
159  * can_attach() result will remain valid until the attach completes.
160  */
161 static int freezer_can_attach(struct cgroup_subsys *ss,
162                               struct cgroup *new_cgroup,
163                               struct task_struct *task, bool threadgroup)
164 {
165         struct freezer *freezer;
166
167         /*
168          * Anything frozen can't move or be moved to/from.
169          *
170          * Since orig_freezer->state == FROZEN means that @task has been
171          * frozen, so it's sufficient to check the latter condition.
172          */
173
174         if (is_task_frozen_enough(task))
175                 return -EBUSY;
176
177         freezer = cgroup_freezer(new_cgroup);
178         if (freezer->state == CGROUP_FROZEN)
179                 return -EBUSY;
180
181         if (threadgroup) {
182                 struct task_struct *c;
183
184                 rcu_read_lock();
185                 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
186                         if (is_task_frozen_enough(c)) {
187                                 rcu_read_unlock();
188                                 return -EBUSY;
189                         }
190                 }
191                 rcu_read_unlock();
192         }
193
194         return 0;
195 }
196
197 static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
198 {
199         struct freezer *freezer;
200
201         /*
202          * No lock is needed, since the task isn't on tasklist yet,
203          * so it can't be moved to another cgroup, which means the
204          * freezer won't be removed and will be valid during this
205          * function call.
206          */
207         freezer = task_freezer(task);
208
209         /*
210          * The root cgroup is non-freezable, so we can skip the
211          * following check.
212          */
213         if (!freezer->css.cgroup->parent)
214                 return;
215
216         spin_lock_irq(&freezer->lock);
217         BUG_ON(freezer->state == CGROUP_FROZEN);
218
219         /* Locking avoids race with FREEZING -> THAWED transitions. */
220         if (freezer->state == CGROUP_FREEZING)
221                 freeze_task(task, true);
222         spin_unlock_irq(&freezer->lock);
223 }
224
225 /*
226  * caller must hold freezer->lock
227  */
228 static void update_freezer_state(struct cgroup *cgroup,
229                                  struct freezer *freezer)
230 {
231         struct cgroup_iter it;
232         struct task_struct *task;
233         unsigned int nfrozen = 0, ntotal = 0;
234
235         cgroup_iter_start(cgroup, &it);
236         while ((task = cgroup_iter_next(cgroup, &it))) {
237                 ntotal++;
238                 if (is_task_frozen_enough(task))
239                         nfrozen++;
240         }
241
242         /*
243          * Transition to FROZEN when no new tasks can be added ensures
244          * that we never exist in the FROZEN state while there are unfrozen
245          * tasks.
246          */
247         if (nfrozen == ntotal)
248                 freezer->state = CGROUP_FROZEN;
249         else if (nfrozen > 0)
250                 freezer->state = CGROUP_FREEZING;
251         else
252                 freezer->state = CGROUP_THAWED;
253         cgroup_iter_end(cgroup, &it);
254 }
255
256 static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
257                         struct seq_file *m)
258 {
259         struct freezer *freezer;
260         enum freezer_state state;
261
262         if (!cgroup_lock_live_group(cgroup))
263                 return -ENODEV;
264
265         freezer = cgroup_freezer(cgroup);
266         spin_lock_irq(&freezer->lock);
267         state = freezer->state;
268         if (state == CGROUP_FREEZING) {
269                 /* We change from FREEZING to FROZEN lazily if the cgroup was
270                  * only partially frozen when we exitted write. */
271                 update_freezer_state(cgroup, freezer);
272                 state = freezer->state;
273         }
274         spin_unlock_irq(&freezer->lock);
275         cgroup_unlock();
276
277         seq_puts(m, freezer_state_strs[state]);
278         seq_putc(m, '\n');
279         return 0;
280 }
281
282 static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
283 {
284         struct cgroup_iter it;
285         struct task_struct *task;
286         unsigned int num_cant_freeze_now = 0;
287
288         freezer->state = CGROUP_FREEZING;
289         cgroup_iter_start(cgroup, &it);
290         while ((task = cgroup_iter_next(cgroup, &it))) {
291                 if (!freeze_task(task, true))
292                         continue;
293                 if (is_task_frozen_enough(task))
294                         continue;
295                 if (!freezing(task) && !freezer_should_skip(task))
296                         num_cant_freeze_now++;
297         }
298         cgroup_iter_end(cgroup, &it);
299
300         return num_cant_freeze_now ? -EBUSY : 0;
301 }
302
303 static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
304 {
305         struct cgroup_iter it;
306         struct task_struct *task;
307
308         cgroup_iter_start(cgroup, &it);
309         while ((task = cgroup_iter_next(cgroup, &it))) {
310                 thaw_process(task);
311         }
312         cgroup_iter_end(cgroup, &it);
313
314         freezer->state = CGROUP_THAWED;
315 }
316
317 static int freezer_change_state(struct cgroup *cgroup,
318                                 enum freezer_state goal_state)
319 {
320         struct freezer *freezer;
321         int retval = 0;
322
323         freezer = cgroup_freezer(cgroup);
324
325         spin_lock_irq(&freezer->lock);
326
327         update_freezer_state(cgroup, freezer);
328         if (goal_state == freezer->state)
329                 goto out;
330
331         switch (goal_state) {
332         case CGROUP_THAWED:
333                 unfreeze_cgroup(cgroup, freezer);
334                 break;
335         case CGROUP_FROZEN:
336                 retval = try_to_freeze_cgroup(cgroup, freezer);
337                 break;
338         default:
339                 BUG();
340         }
341 out:
342         spin_unlock_irq(&freezer->lock);
343
344         return retval;
345 }
346
347 static int freezer_write(struct cgroup *cgroup,
348                          struct cftype *cft,
349                          const char *buffer)
350 {
351         int retval;
352         enum freezer_state goal_state;
353
354         if (strcmp(buffer, freezer_state_strs[CGROUP_THAWED]) == 0)
355                 goal_state = CGROUP_THAWED;
356         else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0)
357                 goal_state = CGROUP_FROZEN;
358         else
359                 return -EINVAL;
360
361         if (!cgroup_lock_live_group(cgroup))
362                 return -ENODEV;
363         retval = freezer_change_state(cgroup, goal_state);
364         cgroup_unlock();
365         return retval;
366 }
367
368 static struct cftype files[] = {
369         {
370                 .name = "state",
371                 .read_seq_string = freezer_read,
372                 .write_string = freezer_write,
373         },
374 };
375
376 static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup)
377 {
378         if (!cgroup->parent)
379                 return 0;
380         return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files));
381 }
382
383 struct cgroup_subsys freezer_subsys = {
384         .name           = "freezer",
385         .create         = freezer_create,
386         .destroy        = freezer_destroy,
387         .populate       = freezer_populate,
388         .subsys_id      = freezer_subsys_id,
389         .can_attach     = freezer_can_attach,
390         .attach         = NULL,
391         .fork           = freezer_fork,
392         .exit           = NULL,
393 };