2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include "blk-cgroup.h"
21 #include <linux/genhd.h>
23 #define MAX_KEY_LEN 100
25 static DEFINE_SPINLOCK(blkio_list_lock);
26 static LIST_HEAD(blkio_list);
28 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
29 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
31 static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
33 static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
34 struct task_struct *, bool);
35 static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
36 struct cgroup *, struct task_struct *, bool);
37 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
38 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
40 struct cgroup_subsys blkio_subsys = {
42 .create = blkiocg_create,
43 .can_attach = blkiocg_can_attach,
44 .attach = blkiocg_attach,
45 .destroy = blkiocg_destroy,
46 .populate = blkiocg_populate,
47 #ifdef CONFIG_BLK_CGROUP
48 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
49 .subsys_id = blkio_subsys_id,
52 .module = THIS_MODULE,
54 EXPORT_SYMBOL_GPL(blkio_subsys);
56 static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
57 struct blkio_policy_node *pn)
59 list_add(&pn->node, &blkcg->policy_list);
62 /* Must be called with blkcg->lock held */
63 static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
68 /* Must be called with blkcg->lock held */
69 static struct blkio_policy_node *
70 blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev)
72 struct blkio_policy_node *pn;
74 list_for_each_entry(pn, &blkcg->policy_list, node) {
82 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
84 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
85 struct blkio_cgroup, css);
87 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
89 void blkio_group_init(struct blkio_group *blkg)
91 spin_lock_init(&blkg->stats_lock);
93 EXPORT_SYMBOL_GPL(blkio_group_init);
96 * Add to the appropriate stat variable depending on the request type.
97 * This should be called with the blkg->stats_lock held.
99 static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
103 stat[BLKIO_STAT_WRITE] += add;
105 stat[BLKIO_STAT_READ] += add;
107 stat[BLKIO_STAT_SYNC] += add;
109 stat[BLKIO_STAT_ASYNC] += add;
113 * Decrements the appropriate stat variable if non-zero depending on the
114 * request type. Panics on value being zero.
115 * This should be called with the blkg->stats_lock held.
117 static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
120 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
121 stat[BLKIO_STAT_WRITE]--;
123 BUG_ON(stat[BLKIO_STAT_READ] == 0);
124 stat[BLKIO_STAT_READ]--;
127 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
128 stat[BLKIO_STAT_SYNC]--;
130 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
131 stat[BLKIO_STAT_ASYNC]--;
135 #ifdef CONFIG_DEBUG_BLK_CGROUP
136 /* This should be called with the blkg->stats_lock held. */
137 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
138 struct blkio_group *curr_blkg)
140 if (blkio_blkg_waiting(&blkg->stats))
142 if (blkg == curr_blkg)
144 blkg->stats.start_group_wait_time = sched_clock();
145 blkio_mark_blkg_waiting(&blkg->stats);
148 /* This should be called with the blkg->stats_lock held. */
149 static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
151 unsigned long long now;
153 if (!blkio_blkg_waiting(stats))
157 if (time_after64(now, stats->start_group_wait_time))
158 stats->group_wait_time += now - stats->start_group_wait_time;
159 blkio_clear_blkg_waiting(stats);
162 /* This should be called with the blkg->stats_lock held. */
163 static void blkio_end_empty_time(struct blkio_group_stats *stats)
165 unsigned long long now;
167 if (!blkio_blkg_empty(stats))
171 if (time_after64(now, stats->start_empty_time))
172 stats->empty_time += now - stats->start_empty_time;
173 blkio_clear_blkg_empty(stats);
176 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
180 spin_lock_irqsave(&blkg->stats_lock, flags);
181 BUG_ON(blkio_blkg_idling(&blkg->stats));
182 blkg->stats.start_idle_time = sched_clock();
183 blkio_mark_blkg_idling(&blkg->stats);
184 spin_unlock_irqrestore(&blkg->stats_lock, flags);
186 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
188 void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
191 unsigned long long now;
192 struct blkio_group_stats *stats;
194 spin_lock_irqsave(&blkg->stats_lock, flags);
195 stats = &blkg->stats;
196 if (blkio_blkg_idling(stats)) {
198 if (time_after64(now, stats->start_idle_time))
199 stats->idle_time += now - stats->start_idle_time;
200 blkio_clear_blkg_idling(stats);
202 spin_unlock_irqrestore(&blkg->stats_lock, flags);
204 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
206 void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
209 struct blkio_group_stats *stats;
211 spin_lock_irqsave(&blkg->stats_lock, flags);
212 stats = &blkg->stats;
213 stats->avg_queue_size_sum +=
214 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
215 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
216 stats->avg_queue_size_samples++;
217 blkio_update_group_wait_time(stats);
218 spin_unlock_irqrestore(&blkg->stats_lock, flags);
220 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
222 void blkiocg_set_start_empty_time(struct blkio_group *blkg, bool ignore)
225 struct blkio_group_stats *stats;
227 spin_lock_irqsave(&blkg->stats_lock, flags);
228 stats = &blkg->stats;
230 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
231 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
232 spin_unlock_irqrestore(&blkg->stats_lock, flags);
237 * If ignore is set, we do not panic on the empty flag being set
238 * already. This is to avoid cases where there are superfluous timeslice
239 * complete events (for eg., forced_dispatch in CFQ) when no IOs are
240 * served which could result in triggering the empty check incorrectly.
242 BUG_ON(!ignore && blkio_blkg_empty(stats));
243 stats->start_empty_time = sched_clock();
244 blkio_mark_blkg_empty(stats);
245 spin_unlock_irqrestore(&blkg->stats_lock, flags);
247 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
249 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
250 unsigned long dequeue)
252 blkg->stats.dequeue += dequeue;
254 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
256 static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
257 struct blkio_group *curr_blkg) {}
258 static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
261 void blkiocg_update_io_add_stats(struct blkio_group *blkg,
262 struct blkio_group *curr_blkg, bool direction,
267 spin_lock_irqsave(&blkg->stats_lock, flags);
268 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
270 blkio_end_empty_time(&blkg->stats);
271 blkio_set_start_group_wait_time(blkg, curr_blkg);
272 spin_unlock_irqrestore(&blkg->stats_lock, flags);
274 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
276 void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
277 bool direction, bool sync)
281 spin_lock_irqsave(&blkg->stats_lock, flags);
282 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
284 spin_unlock_irqrestore(&blkg->stats_lock, flags);
286 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
288 void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
292 spin_lock_irqsave(&blkg->stats_lock, flags);
293 blkg->stats.time += time;
294 spin_unlock_irqrestore(&blkg->stats_lock, flags);
296 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
298 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
299 uint64_t bytes, bool direction, bool sync)
301 struct blkio_group_stats *stats;
304 spin_lock_irqsave(&blkg->stats_lock, flags);
305 stats = &blkg->stats;
306 stats->sectors += bytes >> 9;
307 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
309 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
311 spin_unlock_irqrestore(&blkg->stats_lock, flags);
313 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
315 void blkiocg_update_completion_stats(struct blkio_group *blkg,
316 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
318 struct blkio_group_stats *stats;
320 unsigned long long now = sched_clock();
322 spin_lock_irqsave(&blkg->stats_lock, flags);
323 stats = &blkg->stats;
324 if (time_after64(now, io_start_time))
325 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
326 now - io_start_time, direction, sync);
327 if (time_after64(io_start_time, start_time))
328 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
329 io_start_time - start_time, direction, sync);
330 spin_unlock_irqrestore(&blkg->stats_lock, flags);
332 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
334 void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
339 spin_lock_irqsave(&blkg->stats_lock, flags);
340 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
342 spin_unlock_irqrestore(&blkg->stats_lock, flags);
344 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
346 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
347 struct blkio_group *blkg, void *key, dev_t dev)
351 spin_lock_irqsave(&blkcg->lock, flags);
352 rcu_assign_pointer(blkg->key, key);
353 blkg->blkcg_id = css_id(&blkcg->css);
354 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
355 spin_unlock_irqrestore(&blkcg->lock, flags);
356 #ifdef CONFIG_DEBUG_BLK_CGROUP
357 /* Need to take css reference ? */
358 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
362 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
364 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
366 hlist_del_init_rcu(&blkg->blkcg_node);
371 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
372 * indicating that blk_group was unhashed by the time we got to it.
374 int blkiocg_del_blkio_group(struct blkio_group *blkg)
376 struct blkio_cgroup *blkcg;
378 struct cgroup_subsys_state *css;
382 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
386 blkcg = container_of(css, struct blkio_cgroup, css);
387 spin_lock_irqsave(&blkcg->lock, flags);
388 if (!hlist_unhashed(&blkg->blkcg_node)) {
389 __blkiocg_del_blkio_group(blkg);
392 spin_unlock_irqrestore(&blkcg->lock, flags);
397 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
399 /* called under rcu_read_lock(). */
400 struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
402 struct blkio_group *blkg;
403 struct hlist_node *n;
406 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
414 EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
416 #define SHOW_FUNCTION(__VAR) \
417 static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
418 struct cftype *cftype) \
420 struct blkio_cgroup *blkcg; \
422 blkcg = cgroup_to_blkio_cgroup(cgroup); \
423 return (u64)blkcg->__VAR; \
426 SHOW_FUNCTION(weight);
430 blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
432 struct blkio_cgroup *blkcg;
433 struct blkio_group *blkg;
434 struct hlist_node *n;
435 struct blkio_policy_type *blkiop;
436 struct blkio_policy_node *pn;
438 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
441 blkcg = cgroup_to_blkio_cgroup(cgroup);
442 spin_lock(&blkio_list_lock);
443 spin_lock_irq(&blkcg->lock);
444 blkcg->weight = (unsigned int)val;
446 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
447 pn = blkio_policy_search_node(blkcg, blkg->dev);
452 list_for_each_entry(blkiop, &blkio_list, list)
453 blkiop->ops.blkio_update_group_weight_fn(blkg,
456 spin_unlock_irq(&blkcg->lock);
457 spin_unlock(&blkio_list_lock);
462 blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
464 struct blkio_cgroup *blkcg;
465 struct blkio_group *blkg;
466 struct blkio_group_stats *stats;
467 struct hlist_node *n;
468 uint64_t queued[BLKIO_STAT_TOTAL];
470 #ifdef CONFIG_DEBUG_BLK_CGROUP
471 bool idling, waiting, empty;
472 unsigned long long now = sched_clock();
475 blkcg = cgroup_to_blkio_cgroup(cgroup);
476 spin_lock_irq(&blkcg->lock);
477 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
478 spin_lock(&blkg->stats_lock);
479 stats = &blkg->stats;
480 #ifdef CONFIG_DEBUG_BLK_CGROUP
481 idling = blkio_blkg_idling(stats);
482 waiting = blkio_blkg_waiting(stats);
483 empty = blkio_blkg_empty(stats);
485 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
486 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
487 memset(stats, 0, sizeof(struct blkio_group_stats));
488 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
489 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
490 #ifdef CONFIG_DEBUG_BLK_CGROUP
492 blkio_mark_blkg_idling(stats);
493 stats->start_idle_time = now;
496 blkio_mark_blkg_waiting(stats);
497 stats->start_group_wait_time = now;
500 blkio_mark_blkg_empty(stats);
501 stats->start_empty_time = now;
504 spin_unlock(&blkg->stats_lock);
506 spin_unlock_irq(&blkcg->lock);
510 static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
511 int chars_left, bool diskname_only)
513 snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
514 chars_left -= strlen(str);
515 if (chars_left <= 0) {
517 "Possibly incorrect cgroup stat display format");
523 case BLKIO_STAT_READ:
524 strlcat(str, " Read", chars_left);
526 case BLKIO_STAT_WRITE:
527 strlcat(str, " Write", chars_left);
529 case BLKIO_STAT_SYNC:
530 strlcat(str, " Sync", chars_left);
532 case BLKIO_STAT_ASYNC:
533 strlcat(str, " Async", chars_left);
535 case BLKIO_STAT_TOTAL:
536 strlcat(str, " Total", chars_left);
539 strlcat(str, " Invalid", chars_left);
543 static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
544 struct cgroup_map_cb *cb, dev_t dev)
546 blkio_get_key_name(0, dev, str, chars_left, true);
547 cb->fill(cb, str, val);
551 /* This should be called with blkg->stats_lock held */
552 static uint64_t blkio_get_stat(struct blkio_group *blkg,
553 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
556 char key_str[MAX_KEY_LEN];
557 enum stat_sub_type sub_type;
559 if (type == BLKIO_STAT_TIME)
560 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
561 blkg->stats.time, cb, dev);
562 if (type == BLKIO_STAT_SECTORS)
563 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
564 blkg->stats.sectors, cb, dev);
565 #ifdef CONFIG_DEBUG_BLK_CGROUP
566 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
567 uint64_t sum = blkg->stats.avg_queue_size_sum;
568 uint64_t samples = blkg->stats.avg_queue_size_samples;
570 do_div(sum, samples);
573 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
575 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
576 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
577 blkg->stats.group_wait_time, cb, dev);
578 if (type == BLKIO_STAT_IDLE_TIME)
579 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
580 blkg->stats.idle_time, cb, dev);
581 if (type == BLKIO_STAT_EMPTY_TIME)
582 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
583 blkg->stats.empty_time, cb, dev);
584 if (type == BLKIO_STAT_DEQUEUE)
585 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
586 blkg->stats.dequeue, cb, dev);
589 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
591 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
592 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
594 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
595 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
596 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
597 cb->fill(cb, key_str, disk_total);
601 #define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \
602 static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
603 struct cftype *cftype, struct cgroup_map_cb *cb) \
605 struct blkio_cgroup *blkcg; \
606 struct blkio_group *blkg; \
607 struct hlist_node *n; \
608 uint64_t cgroup_total = 0; \
610 if (!cgroup_lock_live_group(cgroup)) \
613 blkcg = cgroup_to_blkio_cgroup(cgroup); \
615 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
617 spin_lock_irq(&blkg->stats_lock); \
618 cgroup_total += blkio_get_stat(blkg, cb, \
620 spin_unlock_irq(&blkg->stats_lock); \
624 cb->fill(cb, "Total", cgroup_total); \
630 SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0);
631 SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0);
632 SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1);
633 SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1);
634 SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1);
635 SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1);
636 SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1);
637 SHOW_FUNCTION_PER_GROUP(io_queued, BLKIO_STAT_QUEUED, 1);
638 #ifdef CONFIG_DEBUG_BLK_CGROUP
639 SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0);
640 SHOW_FUNCTION_PER_GROUP(avg_queue_size, BLKIO_STAT_AVG_QUEUE_SIZE, 0);
641 SHOW_FUNCTION_PER_GROUP(group_wait_time, BLKIO_STAT_GROUP_WAIT_TIME, 0);
642 SHOW_FUNCTION_PER_GROUP(idle_time, BLKIO_STAT_IDLE_TIME, 0);
643 SHOW_FUNCTION_PER_GROUP(empty_time, BLKIO_STAT_EMPTY_TIME, 0);
645 #undef SHOW_FUNCTION_PER_GROUP
647 static int blkio_check_dev_num(dev_t dev)
650 struct gendisk *disk;
652 disk = get_gendisk(dev, &part);
659 static int blkio_policy_parse_and_set(char *buf,
660 struct blkio_policy_node *newpn)
662 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
664 unsigned long major, minor, temp;
668 memset(s, 0, sizeof(s));
670 while ((p = strsep(&buf, " ")) != NULL) {
676 /* Prevent from inputing too many things */
684 p = strsep(&s[0], ":");
694 ret = strict_strtoul(major_s, 10, &major);
698 ret = strict_strtoul(minor_s, 10, &minor);
702 dev = MKDEV(major, minor);
704 ret = blkio_check_dev_num(dev);
713 ret = strict_strtoul(s[1], 10, &temp);
714 if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
715 temp > BLKIO_WEIGHT_MAX)
718 newpn->weight = temp;
723 unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
726 struct blkio_policy_node *pn;
728 pn = blkio_policy_search_node(blkcg, dev);
732 return blkcg->weight;
734 EXPORT_SYMBOL_GPL(blkcg_get_weight);
737 static int blkiocg_weight_device_write(struct cgroup *cgrp, struct cftype *cft,
742 struct blkio_policy_node *newpn, *pn;
743 struct blkio_cgroup *blkcg;
744 struct blkio_group *blkg;
746 struct hlist_node *n;
747 struct blkio_policy_type *blkiop;
749 buf = kstrdup(buffer, GFP_KERNEL);
753 newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
759 ret = blkio_policy_parse_and_set(buf, newpn);
763 blkcg = cgroup_to_blkio_cgroup(cgrp);
765 spin_lock_irq(&blkcg->lock);
767 pn = blkio_policy_search_node(blkcg, newpn->dev);
769 if (newpn->weight != 0) {
770 blkio_policy_insert_node(blkcg, newpn);
773 spin_unlock_irq(&blkcg->lock);
774 goto update_io_group;
777 if (newpn->weight == 0) {
778 /* weight == 0 means deleteing a specific weight */
779 blkio_policy_delete_node(pn);
780 spin_unlock_irq(&blkcg->lock);
781 goto update_io_group;
783 spin_unlock_irq(&blkcg->lock);
785 pn->weight = newpn->weight;
788 /* update weight for each cfqg */
789 spin_lock(&blkio_list_lock);
790 spin_lock_irq(&blkcg->lock);
792 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
793 if (newpn->dev == blkg->dev) {
794 list_for_each_entry(blkiop, &blkio_list, list)
795 blkiop->ops.blkio_update_group_weight_fn(blkg,
802 spin_unlock_irq(&blkcg->lock);
803 spin_unlock(&blkio_list_lock);
813 static int blkiocg_weight_device_read(struct cgroup *cgrp, struct cftype *cft,
816 struct blkio_cgroup *blkcg;
817 struct blkio_policy_node *pn;
819 seq_printf(m, "dev\tweight\n");
821 blkcg = cgroup_to_blkio_cgroup(cgrp);
822 if (list_empty(&blkcg->policy_list))
825 spin_lock_irq(&blkcg->lock);
826 list_for_each_entry(pn, &blkcg->policy_list, node) {
827 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
828 MINOR(pn->dev), pn->weight);
830 spin_unlock_irq(&blkcg->lock);
836 struct cftype blkio_files[] = {
838 .name = "weight_device",
839 .read_seq_string = blkiocg_weight_device_read,
840 .write_string = blkiocg_weight_device_write,
841 .max_write_len = 256,
845 .read_u64 = blkiocg_weight_read,
846 .write_u64 = blkiocg_weight_write,
850 .read_map = blkiocg_time_read,
854 .read_map = blkiocg_sectors_read,
857 .name = "io_service_bytes",
858 .read_map = blkiocg_io_service_bytes_read,
861 .name = "io_serviced",
862 .read_map = blkiocg_io_serviced_read,
865 .name = "io_service_time",
866 .read_map = blkiocg_io_service_time_read,
869 .name = "io_wait_time",
870 .read_map = blkiocg_io_wait_time_read,
874 .read_map = blkiocg_io_merged_read,
878 .read_map = blkiocg_io_queued_read,
881 .name = "reset_stats",
882 .write_u64 = blkiocg_reset_stats,
884 #ifdef CONFIG_DEBUG_BLK_CGROUP
886 .name = "avg_queue_size",
887 .read_map = blkiocg_avg_queue_size_read,
890 .name = "group_wait_time",
891 .read_map = blkiocg_group_wait_time_read,
895 .read_map = blkiocg_idle_time_read,
898 .name = "empty_time",
899 .read_map = blkiocg_empty_time_read,
903 .read_map = blkiocg_dequeue_read,
908 static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
910 return cgroup_add_files(cgroup, subsys, blkio_files,
911 ARRAY_SIZE(blkio_files));
914 static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
916 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
918 struct blkio_group *blkg;
920 struct blkio_policy_type *blkiop;
921 struct blkio_policy_node *pn, *pntmp;
925 spin_lock_irqsave(&blkcg->lock, flags);
927 if (hlist_empty(&blkcg->blkg_list)) {
928 spin_unlock_irqrestore(&blkcg->lock, flags);
932 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
934 key = rcu_dereference(blkg->key);
935 __blkiocg_del_blkio_group(blkg);
937 spin_unlock_irqrestore(&blkcg->lock, flags);
940 * This blkio_group is being unlinked as associated cgroup is going
941 * away. Let all the IO controlling policies know about this event.
943 * Currently this is static call to one io controlling policy. Once
944 * we have more policies in place, we need some dynamic registration
945 * of callback function.
947 spin_lock(&blkio_list_lock);
948 list_for_each_entry(blkiop, &blkio_list, list)
949 blkiop->ops.blkio_unlink_group_fn(key, blkg);
950 spin_unlock(&blkio_list_lock);
954 list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
955 blkio_policy_delete_node(pn);
958 free_css_id(&blkio_subsys, &blkcg->css);
960 if (blkcg != &blkio_root_cgroup)
964 static struct cgroup_subsys_state *
965 blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
967 struct blkio_cgroup *blkcg, *parent_blkcg;
969 if (!cgroup->parent) {
970 blkcg = &blkio_root_cgroup;
974 /* Currently we do not support hierarchy deeper than two level (0,1) */
975 parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
976 if (css_depth(&parent_blkcg->css) > 0)
977 return ERR_PTR(-EINVAL);
979 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
981 return ERR_PTR(-ENOMEM);
983 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
985 spin_lock_init(&blkcg->lock);
986 INIT_HLIST_HEAD(&blkcg->blkg_list);
988 INIT_LIST_HEAD(&blkcg->policy_list);
993 * We cannot support shared io contexts, as we have no mean to support
994 * two tasks with the same ioc in two different groups without major rework
995 * of the main cic data structures. For now we allow a task to change
996 * its cgroup only if it's the only owner of its ioc.
998 static int blkiocg_can_attach(struct cgroup_subsys *subsys,
999 struct cgroup *cgroup, struct task_struct *tsk,
1002 struct io_context *ioc;
1005 /* task_lock() is needed to avoid races with exit_io_context() */
1007 ioc = tsk->io_context;
1008 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1015 static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
1016 struct cgroup *prev, struct task_struct *tsk,
1019 struct io_context *ioc;
1022 ioc = tsk->io_context;
1024 ioc->cgroup_changed = 1;
1028 void blkio_policy_register(struct blkio_policy_type *blkiop)
1030 spin_lock(&blkio_list_lock);
1031 list_add_tail(&blkiop->list, &blkio_list);
1032 spin_unlock(&blkio_list_lock);
1034 EXPORT_SYMBOL_GPL(blkio_policy_register);
1036 void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1038 spin_lock(&blkio_list_lock);
1039 list_del_init(&blkiop->list);
1040 spin_unlock(&blkio_list_lock);
1042 EXPORT_SYMBOL_GPL(blkio_policy_unregister);
1044 static int __init init_cgroup_blkio(void)
1046 return cgroup_load_subsys(&blkio_subsys);
1049 static void __exit exit_cgroup_blkio(void)
1051 cgroup_unload_subsys(&blkio_subsys);
1054 module_init(init_cgroup_blkio);
1055 module_exit(exit_cgroup_blkio);
1056 MODULE_LICENSE("GPL");