blkio: Add io_queued and avg_queue_size stats
[safe/jmp/linux-2.6] / block / blk-cgroup.c
1 /*
2  * Common Block IO controller cgroup interface
3  *
4  * Based on ideas and code from CFQ, CFS and BFQ:
5  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6  *
7  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8  *                    Paolo Valente <paolo.valente@unimore.it>
9  *
10  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11  *                    Nauman Rafique <nauman@google.com>
12  */
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include "blk-cgroup.h"
20
21 #define MAX_KEY_LEN 100
22
23 static DEFINE_SPINLOCK(blkio_list_lock);
24 static LIST_HEAD(blkio_list);
25
26 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
27 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
28
29 static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
30                                                   struct cgroup *);
31 static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
32                               struct task_struct *, bool);
33 static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
34                            struct cgroup *, struct task_struct *, bool);
35 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
36 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
37
38 struct cgroup_subsys blkio_subsys = {
39         .name = "blkio",
40         .create = blkiocg_create,
41         .can_attach = blkiocg_can_attach,
42         .attach = blkiocg_attach,
43         .destroy = blkiocg_destroy,
44         .populate = blkiocg_populate,
45 #ifdef CONFIG_BLK_CGROUP
46         /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
47         .subsys_id = blkio_subsys_id,
48 #endif
49         .use_id = 1,
50         .module = THIS_MODULE,
51 };
52 EXPORT_SYMBOL_GPL(blkio_subsys);
53
54 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
55 {
56         return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
57                             struct blkio_cgroup, css);
58 }
59 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
60
61 void blkio_group_init(struct blkio_group *blkg)
62 {
63         spin_lock_init(&blkg->stats_lock);
64 }
65 EXPORT_SYMBOL_GPL(blkio_group_init);
66
67 /*
68  * Add to the appropriate stat variable depending on the request type.
69  * This should be called with the blkg->stats_lock held.
70  */
71 static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
72                                 bool sync)
73 {
74         if (direction)
75                 stat[BLKIO_STAT_WRITE] += add;
76         else
77                 stat[BLKIO_STAT_READ] += add;
78         if (sync)
79                 stat[BLKIO_STAT_SYNC] += add;
80         else
81                 stat[BLKIO_STAT_ASYNC] += add;
82 }
83
84 /*
85  * Decrements the appropriate stat variable if non-zero depending on the
86  * request type. Panics on value being zero.
87  * This should be called with the blkg->stats_lock held.
88  */
89 static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
90 {
91         if (direction) {
92                 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
93                 stat[BLKIO_STAT_WRITE]--;
94         } else {
95                 BUG_ON(stat[BLKIO_STAT_READ] == 0);
96                 stat[BLKIO_STAT_READ]--;
97         }
98         if (sync) {
99                 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
100                 stat[BLKIO_STAT_SYNC]--;
101         } else {
102                 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
103                 stat[BLKIO_STAT_ASYNC]--;
104         }
105 }
106
107 #ifdef CONFIG_DEBUG_BLK_CGROUP
108 void blkiocg_update_set_active_queue_stats(struct blkio_group *blkg)
109 {
110         unsigned long flags;
111         struct blkio_group_stats *stats;
112
113         spin_lock_irqsave(&blkg->stats_lock, flags);
114         stats = &blkg->stats;
115         stats->avg_queue_size_sum +=
116                         stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
117                         stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
118         stats->avg_queue_size_samples++;
119         spin_unlock_irqrestore(&blkg->stats_lock, flags);
120 }
121 EXPORT_SYMBOL_GPL(blkiocg_update_set_active_queue_stats);
122 #endif
123
124 void blkiocg_update_request_add_stats(struct blkio_group *blkg,
125                         struct blkio_group *curr_blkg, bool direction,
126                         bool sync)
127 {
128         unsigned long flags;
129
130         spin_lock_irqsave(&blkg->stats_lock, flags);
131         blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
132                         sync);
133         spin_unlock_irqrestore(&blkg->stats_lock, flags);
134 }
135 EXPORT_SYMBOL_GPL(blkiocg_update_request_add_stats);
136
137 void blkiocg_update_request_remove_stats(struct blkio_group *blkg,
138                                                 bool direction, bool sync)
139 {
140         unsigned long flags;
141
142         spin_lock_irqsave(&blkg->stats_lock, flags);
143         blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
144                                         direction, sync);
145         spin_unlock_irqrestore(&blkg->stats_lock, flags);
146 }
147 EXPORT_SYMBOL_GPL(blkiocg_update_request_remove_stats);
148
149 void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
150 {
151         unsigned long flags;
152
153         spin_lock_irqsave(&blkg->stats_lock, flags);
154         blkg->stats.time += time;
155         spin_unlock_irqrestore(&blkg->stats_lock, flags);
156 }
157 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
158
159 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
160                                 uint64_t bytes, bool direction, bool sync)
161 {
162         struct blkio_group_stats *stats;
163         unsigned long flags;
164
165         spin_lock_irqsave(&blkg->stats_lock, flags);
166         stats = &blkg->stats;
167         stats->sectors += bytes >> 9;
168         blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
169                         sync);
170         blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
171                         direction, sync);
172         spin_unlock_irqrestore(&blkg->stats_lock, flags);
173 }
174 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
175
176 void blkiocg_update_completion_stats(struct blkio_group *blkg,
177         uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
178 {
179         struct blkio_group_stats *stats;
180         unsigned long flags;
181         unsigned long long now = sched_clock();
182
183         spin_lock_irqsave(&blkg->stats_lock, flags);
184         stats = &blkg->stats;
185         if (time_after64(now, io_start_time))
186                 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
187                                 now - io_start_time, direction, sync);
188         if (time_after64(io_start_time, start_time))
189                 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
190                                 io_start_time - start_time, direction, sync);
191         spin_unlock_irqrestore(&blkg->stats_lock, flags);
192 }
193 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
194
195 void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
196                                         bool sync)
197 {
198         unsigned long flags;
199
200         spin_lock_irqsave(&blkg->stats_lock, flags);
201         blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
202                         sync);
203         spin_unlock_irqrestore(&blkg->stats_lock, flags);
204 }
205 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
206
207 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
208                         struct blkio_group *blkg, void *key, dev_t dev)
209 {
210         unsigned long flags;
211
212         spin_lock_irqsave(&blkcg->lock, flags);
213         rcu_assign_pointer(blkg->key, key);
214         blkg->blkcg_id = css_id(&blkcg->css);
215         hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
216         spin_unlock_irqrestore(&blkcg->lock, flags);
217 #ifdef CONFIG_DEBUG_BLK_CGROUP
218         /* Need to take css reference ? */
219         cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
220 #endif
221         blkg->dev = dev;
222 }
223 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
224
225 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
226 {
227         hlist_del_init_rcu(&blkg->blkcg_node);
228         blkg->blkcg_id = 0;
229 }
230
231 /*
232  * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
233  * indicating that blk_group was unhashed by the time we got to it.
234  */
235 int blkiocg_del_blkio_group(struct blkio_group *blkg)
236 {
237         struct blkio_cgroup *blkcg;
238         unsigned long flags;
239         struct cgroup_subsys_state *css;
240         int ret = 1;
241
242         rcu_read_lock();
243         css = css_lookup(&blkio_subsys, blkg->blkcg_id);
244         if (!css)
245                 goto out;
246
247         blkcg = container_of(css, struct blkio_cgroup, css);
248         spin_lock_irqsave(&blkcg->lock, flags);
249         if (!hlist_unhashed(&blkg->blkcg_node)) {
250                 __blkiocg_del_blkio_group(blkg);
251                 ret = 0;
252         }
253         spin_unlock_irqrestore(&blkcg->lock, flags);
254 out:
255         rcu_read_unlock();
256         return ret;
257 }
258 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
259
260 /* called under rcu_read_lock(). */
261 struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
262 {
263         struct blkio_group *blkg;
264         struct hlist_node *n;
265         void *__key;
266
267         hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
268                 __key = blkg->key;
269                 if (__key == key)
270                         return blkg;
271         }
272
273         return NULL;
274 }
275 EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
276
277 #define SHOW_FUNCTION(__VAR)                                            \
278 static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup,                \
279                                        struct cftype *cftype)           \
280 {                                                                       \
281         struct blkio_cgroup *blkcg;                                     \
282                                                                         \
283         blkcg = cgroup_to_blkio_cgroup(cgroup);                         \
284         return (u64)blkcg->__VAR;                                       \
285 }
286
287 SHOW_FUNCTION(weight);
288 #undef SHOW_FUNCTION
289
290 static int
291 blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
292 {
293         struct blkio_cgroup *blkcg;
294         struct blkio_group *blkg;
295         struct hlist_node *n;
296         struct blkio_policy_type *blkiop;
297
298         if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
299                 return -EINVAL;
300
301         blkcg = cgroup_to_blkio_cgroup(cgroup);
302         spin_lock(&blkio_list_lock);
303         spin_lock_irq(&blkcg->lock);
304         blkcg->weight = (unsigned int)val;
305         hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
306                 list_for_each_entry(blkiop, &blkio_list, list)
307                         blkiop->ops.blkio_update_group_weight_fn(blkg,
308                                         blkcg->weight);
309         }
310         spin_unlock_irq(&blkcg->lock);
311         spin_unlock(&blkio_list_lock);
312         return 0;
313 }
314
315 static int
316 blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
317 {
318         struct blkio_cgroup *blkcg;
319         struct blkio_group *blkg;
320         struct hlist_node *n;
321         uint64_t queued[BLKIO_STAT_TOTAL];
322         int i;
323
324         blkcg = cgroup_to_blkio_cgroup(cgroup);
325         spin_lock_irq(&blkcg->lock);
326         hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
327                 spin_lock(&blkg->stats_lock);
328                 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
329                         queued[i] = blkg->stats.stat_arr[BLKIO_STAT_QUEUED][i];
330                 memset(&blkg->stats, 0, sizeof(struct blkio_group_stats));
331                 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
332                         blkg->stats.stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
333                 spin_unlock(&blkg->stats_lock);
334         }
335         spin_unlock_irq(&blkcg->lock);
336         return 0;
337 }
338
339 static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
340                                 int chars_left, bool diskname_only)
341 {
342         snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
343         chars_left -= strlen(str);
344         if (chars_left <= 0) {
345                 printk(KERN_WARNING
346                         "Possibly incorrect cgroup stat display format");
347                 return;
348         }
349         if (diskname_only)
350                 return;
351         switch (type) {
352         case BLKIO_STAT_READ:
353                 strlcat(str, " Read", chars_left);
354                 break;
355         case BLKIO_STAT_WRITE:
356                 strlcat(str, " Write", chars_left);
357                 break;
358         case BLKIO_STAT_SYNC:
359                 strlcat(str, " Sync", chars_left);
360                 break;
361         case BLKIO_STAT_ASYNC:
362                 strlcat(str, " Async", chars_left);
363                 break;
364         case BLKIO_STAT_TOTAL:
365                 strlcat(str, " Total", chars_left);
366                 break;
367         default:
368                 strlcat(str, " Invalid", chars_left);
369         }
370 }
371
372 static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
373                                 struct cgroup_map_cb *cb, dev_t dev)
374 {
375         blkio_get_key_name(0, dev, str, chars_left, true);
376         cb->fill(cb, str, val);
377         return val;
378 }
379
380 /* This should be called with blkg->stats_lock held */
381 static uint64_t blkio_get_stat(struct blkio_group *blkg,
382                 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
383 {
384         uint64_t disk_total;
385         char key_str[MAX_KEY_LEN];
386         enum stat_sub_type sub_type;
387
388         if (type == BLKIO_STAT_TIME)
389                 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
390                                         blkg->stats.time, cb, dev);
391         if (type == BLKIO_STAT_SECTORS)
392                 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
393                                         blkg->stats.sectors, cb, dev);
394 #ifdef CONFIG_DEBUG_BLK_CGROUP
395         if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
396                 uint64_t sum = blkg->stats.avg_queue_size_sum;
397                 uint64_t samples = blkg->stats.avg_queue_size_samples;
398                 if (samples)
399                         do_div(sum, samples);
400                 else
401                         sum = 0;
402                 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
403         }
404         if (type == BLKIO_STAT_DEQUEUE)
405                 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
406                                         blkg->stats.dequeue, cb, dev);
407 #endif
408
409         for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
410                         sub_type++) {
411                 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
412                 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
413         }
414         disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
415                         blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
416         blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
417         cb->fill(cb, key_str, disk_total);
418         return disk_total;
419 }
420
421 #define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total)                \
422 static int blkiocg_##__VAR##_read(struct cgroup *cgroup,                \
423                 struct cftype *cftype, struct cgroup_map_cb *cb)        \
424 {                                                                       \
425         struct blkio_cgroup *blkcg;                                     \
426         struct blkio_group *blkg;                                       \
427         struct hlist_node *n;                                           \
428         uint64_t cgroup_total = 0;                                      \
429                                                                         \
430         if (!cgroup_lock_live_group(cgroup))                            \
431                 return -ENODEV;                                         \
432                                                                         \
433         blkcg = cgroup_to_blkio_cgroup(cgroup);                         \
434         rcu_read_lock();                                                \
435         hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
436                 if (blkg->dev) {                                        \
437                         spin_lock_irq(&blkg->stats_lock);               \
438                         cgroup_total += blkio_get_stat(blkg, cb,        \
439                                                 blkg->dev, type);       \
440                         spin_unlock_irq(&blkg->stats_lock);             \
441                 }                                                       \
442         }                                                               \
443         if (show_total)                                                 \
444                 cb->fill(cb, "Total", cgroup_total);                    \
445         rcu_read_unlock();                                              \
446         cgroup_unlock();                                                \
447         return 0;                                                       \
448 }
449
450 SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0);
451 SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0);
452 SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1);
453 SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1);
454 SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1);
455 SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1);
456 SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1);
457 SHOW_FUNCTION_PER_GROUP(io_queued, BLKIO_STAT_QUEUED, 1);
458 #ifdef CONFIG_DEBUG_BLK_CGROUP
459 SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0);
460 SHOW_FUNCTION_PER_GROUP(avg_queue_size, BLKIO_STAT_AVG_QUEUE_SIZE, 0);
461 #endif
462 #undef SHOW_FUNCTION_PER_GROUP
463
464 #ifdef CONFIG_DEBUG_BLK_CGROUP
465 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
466                         unsigned long dequeue)
467 {
468         blkg->stats.dequeue += dequeue;
469 }
470 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
471 #endif
472
473 struct cftype blkio_files[] = {
474         {
475                 .name = "weight",
476                 .read_u64 = blkiocg_weight_read,
477                 .write_u64 = blkiocg_weight_write,
478         },
479         {
480                 .name = "time",
481                 .read_map = blkiocg_time_read,
482         },
483         {
484                 .name = "sectors",
485                 .read_map = blkiocg_sectors_read,
486         },
487         {
488                 .name = "io_service_bytes",
489                 .read_map = blkiocg_io_service_bytes_read,
490         },
491         {
492                 .name = "io_serviced",
493                 .read_map = blkiocg_io_serviced_read,
494         },
495         {
496                 .name = "io_service_time",
497                 .read_map = blkiocg_io_service_time_read,
498         },
499         {
500                 .name = "io_wait_time",
501                 .read_map = blkiocg_io_wait_time_read,
502         },
503         {
504                 .name = "io_merged",
505                 .read_map = blkiocg_io_merged_read,
506         },
507         {
508                 .name = "io_queued",
509                 .read_map = blkiocg_io_queued_read,
510         },
511         {
512                 .name = "reset_stats",
513                 .write_u64 = blkiocg_reset_stats,
514         },
515 #ifdef CONFIG_DEBUG_BLK_CGROUP
516         {
517                 .name = "avg_queue_size",
518                 .read_map = blkiocg_avg_queue_size_read,
519         },
520         {
521                 .name = "dequeue",
522                 .read_map = blkiocg_dequeue_read,
523         },
524 #endif
525 };
526
527 static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
528 {
529         return cgroup_add_files(cgroup, subsys, blkio_files,
530                                 ARRAY_SIZE(blkio_files));
531 }
532
533 static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
534 {
535         struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
536         unsigned long flags;
537         struct blkio_group *blkg;
538         void *key;
539         struct blkio_policy_type *blkiop;
540
541         rcu_read_lock();
542 remove_entry:
543         spin_lock_irqsave(&blkcg->lock, flags);
544
545         if (hlist_empty(&blkcg->blkg_list)) {
546                 spin_unlock_irqrestore(&blkcg->lock, flags);
547                 goto done;
548         }
549
550         blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
551                                 blkcg_node);
552         key = rcu_dereference(blkg->key);
553         __blkiocg_del_blkio_group(blkg);
554
555         spin_unlock_irqrestore(&blkcg->lock, flags);
556
557         /*
558          * This blkio_group is being unlinked as associated cgroup is going
559          * away. Let all the IO controlling policies know about this event.
560          *
561          * Currently this is static call to one io controlling policy. Once
562          * we have more policies in place, we need some dynamic registration
563          * of callback function.
564          */
565         spin_lock(&blkio_list_lock);
566         list_for_each_entry(blkiop, &blkio_list, list)
567                 blkiop->ops.blkio_unlink_group_fn(key, blkg);
568         spin_unlock(&blkio_list_lock);
569         goto remove_entry;
570 done:
571         free_css_id(&blkio_subsys, &blkcg->css);
572         rcu_read_unlock();
573         if (blkcg != &blkio_root_cgroup)
574                 kfree(blkcg);
575 }
576
577 static struct cgroup_subsys_state *
578 blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
579 {
580         struct blkio_cgroup *blkcg, *parent_blkcg;
581
582         if (!cgroup->parent) {
583                 blkcg = &blkio_root_cgroup;
584                 goto done;
585         }
586
587         /* Currently we do not support hierarchy deeper than two level (0,1) */
588         parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
589         if (css_depth(&parent_blkcg->css) > 0)
590                 return ERR_PTR(-EINVAL);
591
592         blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
593         if (!blkcg)
594                 return ERR_PTR(-ENOMEM);
595
596         blkcg->weight = BLKIO_WEIGHT_DEFAULT;
597 done:
598         spin_lock_init(&blkcg->lock);
599         INIT_HLIST_HEAD(&blkcg->blkg_list);
600
601         return &blkcg->css;
602 }
603
604 /*
605  * We cannot support shared io contexts, as we have no mean to support
606  * two tasks with the same ioc in two different groups without major rework
607  * of the main cic data structures.  For now we allow a task to change
608  * its cgroup only if it's the only owner of its ioc.
609  */
610 static int blkiocg_can_attach(struct cgroup_subsys *subsys,
611                                 struct cgroup *cgroup, struct task_struct *tsk,
612                                 bool threadgroup)
613 {
614         struct io_context *ioc;
615         int ret = 0;
616
617         /* task_lock() is needed to avoid races with exit_io_context() */
618         task_lock(tsk);
619         ioc = tsk->io_context;
620         if (ioc && atomic_read(&ioc->nr_tasks) > 1)
621                 ret = -EINVAL;
622         task_unlock(tsk);
623
624         return ret;
625 }
626
627 static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
628                                 struct cgroup *prev, struct task_struct *tsk,
629                                 bool threadgroup)
630 {
631         struct io_context *ioc;
632
633         task_lock(tsk);
634         ioc = tsk->io_context;
635         if (ioc)
636                 ioc->cgroup_changed = 1;
637         task_unlock(tsk);
638 }
639
640 void blkio_policy_register(struct blkio_policy_type *blkiop)
641 {
642         spin_lock(&blkio_list_lock);
643         list_add_tail(&blkiop->list, &blkio_list);
644         spin_unlock(&blkio_list_lock);
645 }
646 EXPORT_SYMBOL_GPL(blkio_policy_register);
647
648 void blkio_policy_unregister(struct blkio_policy_type *blkiop)
649 {
650         spin_lock(&blkio_list_lock);
651         list_del_init(&blkiop->list);
652         spin_unlock(&blkio_list_lock);
653 }
654 EXPORT_SYMBOL_GPL(blkio_policy_unregister);
655
656 static int __init init_cgroup_blkio(void)
657 {
658         return cgroup_load_subsys(&blkio_subsys);
659 }
660
661 static void __exit exit_cgroup_blkio(void)
662 {
663         cgroup_unload_subsys(&blkio_subsys);
664 }
665
666 module_init(init_cgroup_blkio);
667 module_exit(exit_cgroup_blkio);
668 MODULE_LICENSE("GPL");