2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include "blk-cgroup.h"
21 #include <linux/genhd.h>
23 #define MAX_KEY_LEN 100
25 static DEFINE_SPINLOCK(blkio_list_lock);
26 static LIST_HEAD(blkio_list);
28 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
29 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
31 static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
33 static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
34 struct task_struct *, bool);
35 static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
36 struct cgroup *, struct task_struct *, bool);
37 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
38 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
40 struct cgroup_subsys blkio_subsys = {
42 .create = blkiocg_create,
43 .can_attach = blkiocg_can_attach,
44 .attach = blkiocg_attach,
45 .destroy = blkiocg_destroy,
46 .populate = blkiocg_populate,
47 #ifdef CONFIG_BLK_CGROUP
48 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
49 .subsys_id = blkio_subsys_id,
52 .module = THIS_MODULE,
54 EXPORT_SYMBOL_GPL(blkio_subsys);
56 static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
57 struct blkio_policy_node *pn)
59 list_add(&pn->node, &blkcg->policy_list);
62 /* Must be called with blkcg->lock held */
63 static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
68 /* Must be called with blkcg->lock held */
69 static struct blkio_policy_node *
70 blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev)
72 struct blkio_policy_node *pn;
74 list_for_each_entry(pn, &blkcg->policy_list, node) {
82 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
84 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
85 struct blkio_cgroup, css);
87 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
90 * Add to the appropriate stat variable depending on the request type.
91 * This should be called with the blkg->stats_lock held.
93 static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
97 stat[BLKIO_STAT_WRITE] += add;
99 stat[BLKIO_STAT_READ] += add;
101 stat[BLKIO_STAT_SYNC] += add;
103 stat[BLKIO_STAT_ASYNC] += add;
107 * Decrements the appropriate stat variable if non-zero depending on the
108 * request type. Panics on value being zero.
109 * This should be called with the blkg->stats_lock held.
111 static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
114 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
115 stat[BLKIO_STAT_WRITE]--;
117 BUG_ON(stat[BLKIO_STAT_READ] == 0);
118 stat[BLKIO_STAT_READ]--;
121 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
122 stat[BLKIO_STAT_SYNC]--;
124 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
125 stat[BLKIO_STAT_ASYNC]--;
129 #ifdef CONFIG_DEBUG_BLK_CGROUP
130 /* This should be called with the blkg->stats_lock held. */
131 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
132 struct blkio_group *curr_blkg)
134 if (blkio_blkg_waiting(&blkg->stats))
136 if (blkg == curr_blkg)
138 blkg->stats.start_group_wait_time = sched_clock();
139 blkio_mark_blkg_waiting(&blkg->stats);
142 /* This should be called with the blkg->stats_lock held. */
143 static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
145 unsigned long long now;
147 if (!blkio_blkg_waiting(stats))
151 if (time_after64(now, stats->start_group_wait_time))
152 stats->group_wait_time += now - stats->start_group_wait_time;
153 blkio_clear_blkg_waiting(stats);
156 /* This should be called with the blkg->stats_lock held. */
157 static void blkio_end_empty_time(struct blkio_group_stats *stats)
159 unsigned long long now;
161 if (!blkio_blkg_empty(stats))
165 if (time_after64(now, stats->start_empty_time))
166 stats->empty_time += now - stats->start_empty_time;
167 blkio_clear_blkg_empty(stats);
170 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
174 spin_lock_irqsave(&blkg->stats_lock, flags);
175 BUG_ON(blkio_blkg_idling(&blkg->stats));
176 blkg->stats.start_idle_time = sched_clock();
177 blkio_mark_blkg_idling(&blkg->stats);
178 spin_unlock_irqrestore(&blkg->stats_lock, flags);
180 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
182 void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
185 unsigned long long now;
186 struct blkio_group_stats *stats;
188 spin_lock_irqsave(&blkg->stats_lock, flags);
189 stats = &blkg->stats;
190 if (blkio_blkg_idling(stats)) {
192 if (time_after64(now, stats->start_idle_time))
193 stats->idle_time += now - stats->start_idle_time;
194 blkio_clear_blkg_idling(stats);
196 spin_unlock_irqrestore(&blkg->stats_lock, flags);
198 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
200 void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
203 struct blkio_group_stats *stats;
205 spin_lock_irqsave(&blkg->stats_lock, flags);
206 stats = &blkg->stats;
207 stats->avg_queue_size_sum +=
208 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
209 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
210 stats->avg_queue_size_samples++;
211 blkio_update_group_wait_time(stats);
212 spin_unlock_irqrestore(&blkg->stats_lock, flags);
214 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
216 void blkiocg_set_start_empty_time(struct blkio_group *blkg, bool ignore)
219 struct blkio_group_stats *stats;
221 spin_lock_irqsave(&blkg->stats_lock, flags);
222 stats = &blkg->stats;
224 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
225 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
226 spin_unlock_irqrestore(&blkg->stats_lock, flags);
231 * If ignore is set, we do not panic on the empty flag being set
232 * already. This is to avoid cases where there are superfluous timeslice
233 * complete events (for eg., forced_dispatch in CFQ) when no IOs are
234 * served which could result in triggering the empty check incorrectly.
236 BUG_ON(!ignore && blkio_blkg_empty(stats));
237 stats->start_empty_time = sched_clock();
238 blkio_mark_blkg_empty(stats);
239 spin_unlock_irqrestore(&blkg->stats_lock, flags);
241 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
243 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
244 unsigned long dequeue)
246 blkg->stats.dequeue += dequeue;
248 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
250 static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
251 struct blkio_group *curr_blkg) {}
252 static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
255 void blkiocg_update_io_add_stats(struct blkio_group *blkg,
256 struct blkio_group *curr_blkg, bool direction,
261 spin_lock_irqsave(&blkg->stats_lock, flags);
262 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
264 blkio_end_empty_time(&blkg->stats);
265 blkio_set_start_group_wait_time(blkg, curr_blkg);
266 spin_unlock_irqrestore(&blkg->stats_lock, flags);
268 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
270 void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
271 bool direction, bool sync)
275 spin_lock_irqsave(&blkg->stats_lock, flags);
276 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
278 spin_unlock_irqrestore(&blkg->stats_lock, flags);
280 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
282 void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
286 spin_lock_irqsave(&blkg->stats_lock, flags);
287 blkg->stats.time += time;
288 spin_unlock_irqrestore(&blkg->stats_lock, flags);
290 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
292 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
293 uint64_t bytes, bool direction, bool sync)
295 struct blkio_group_stats *stats;
298 spin_lock_irqsave(&blkg->stats_lock, flags);
299 stats = &blkg->stats;
300 stats->sectors += bytes >> 9;
301 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
303 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
305 spin_unlock_irqrestore(&blkg->stats_lock, flags);
307 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
309 void blkiocg_update_completion_stats(struct blkio_group *blkg,
310 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
312 struct blkio_group_stats *stats;
314 unsigned long long now = sched_clock();
316 spin_lock_irqsave(&blkg->stats_lock, flags);
317 stats = &blkg->stats;
318 if (time_after64(now, io_start_time))
319 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
320 now - io_start_time, direction, sync);
321 if (time_after64(io_start_time, start_time))
322 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
323 io_start_time - start_time, direction, sync);
324 spin_unlock_irqrestore(&blkg->stats_lock, flags);
326 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
328 void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
333 spin_lock_irqsave(&blkg->stats_lock, flags);
334 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
336 spin_unlock_irqrestore(&blkg->stats_lock, flags);
338 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
340 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
341 struct blkio_group *blkg, void *key, dev_t dev)
345 spin_lock_irqsave(&blkcg->lock, flags);
346 spin_lock_init(&blkg->stats_lock);
347 rcu_assign_pointer(blkg->key, key);
348 blkg->blkcg_id = css_id(&blkcg->css);
349 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
350 spin_unlock_irqrestore(&blkcg->lock, flags);
351 #ifdef CONFIG_DEBUG_BLK_CGROUP
352 /* Need to take css reference ? */
353 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
357 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
359 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
361 hlist_del_init_rcu(&blkg->blkcg_node);
366 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
367 * indicating that blk_group was unhashed by the time we got to it.
369 int blkiocg_del_blkio_group(struct blkio_group *blkg)
371 struct blkio_cgroup *blkcg;
373 struct cgroup_subsys_state *css;
377 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
381 blkcg = container_of(css, struct blkio_cgroup, css);
382 spin_lock_irqsave(&blkcg->lock, flags);
383 if (!hlist_unhashed(&blkg->blkcg_node)) {
384 __blkiocg_del_blkio_group(blkg);
387 spin_unlock_irqrestore(&blkcg->lock, flags);
392 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
394 /* called under rcu_read_lock(). */
395 struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
397 struct blkio_group *blkg;
398 struct hlist_node *n;
401 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
409 EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
411 #define SHOW_FUNCTION(__VAR) \
412 static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
413 struct cftype *cftype) \
415 struct blkio_cgroup *blkcg; \
417 blkcg = cgroup_to_blkio_cgroup(cgroup); \
418 return (u64)blkcg->__VAR; \
421 SHOW_FUNCTION(weight);
425 blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
427 struct blkio_cgroup *blkcg;
428 struct blkio_group *blkg;
429 struct hlist_node *n;
430 struct blkio_policy_type *blkiop;
431 struct blkio_policy_node *pn;
433 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
436 blkcg = cgroup_to_blkio_cgroup(cgroup);
437 spin_lock(&blkio_list_lock);
438 spin_lock_irq(&blkcg->lock);
439 blkcg->weight = (unsigned int)val;
441 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
442 pn = blkio_policy_search_node(blkcg, blkg->dev);
447 list_for_each_entry(blkiop, &blkio_list, list)
448 blkiop->ops.blkio_update_group_weight_fn(blkg,
451 spin_unlock_irq(&blkcg->lock);
452 spin_unlock(&blkio_list_lock);
457 blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
459 struct blkio_cgroup *blkcg;
460 struct blkio_group *blkg;
461 struct blkio_group_stats *stats;
462 struct hlist_node *n;
463 uint64_t queued[BLKIO_STAT_TOTAL];
465 #ifdef CONFIG_DEBUG_BLK_CGROUP
466 bool idling, waiting, empty;
467 unsigned long long now = sched_clock();
470 blkcg = cgroup_to_blkio_cgroup(cgroup);
471 spin_lock_irq(&blkcg->lock);
472 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
473 spin_lock(&blkg->stats_lock);
474 stats = &blkg->stats;
475 #ifdef CONFIG_DEBUG_BLK_CGROUP
476 idling = blkio_blkg_idling(stats);
477 waiting = blkio_blkg_waiting(stats);
478 empty = blkio_blkg_empty(stats);
480 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
481 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
482 memset(stats, 0, sizeof(struct blkio_group_stats));
483 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
484 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
485 #ifdef CONFIG_DEBUG_BLK_CGROUP
487 blkio_mark_blkg_idling(stats);
488 stats->start_idle_time = now;
491 blkio_mark_blkg_waiting(stats);
492 stats->start_group_wait_time = now;
495 blkio_mark_blkg_empty(stats);
496 stats->start_empty_time = now;
499 spin_unlock(&blkg->stats_lock);
501 spin_unlock_irq(&blkcg->lock);
505 static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
506 int chars_left, bool diskname_only)
508 snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
509 chars_left -= strlen(str);
510 if (chars_left <= 0) {
512 "Possibly incorrect cgroup stat display format");
518 case BLKIO_STAT_READ:
519 strlcat(str, " Read", chars_left);
521 case BLKIO_STAT_WRITE:
522 strlcat(str, " Write", chars_left);
524 case BLKIO_STAT_SYNC:
525 strlcat(str, " Sync", chars_left);
527 case BLKIO_STAT_ASYNC:
528 strlcat(str, " Async", chars_left);
530 case BLKIO_STAT_TOTAL:
531 strlcat(str, " Total", chars_left);
534 strlcat(str, " Invalid", chars_left);
538 static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
539 struct cgroup_map_cb *cb, dev_t dev)
541 blkio_get_key_name(0, dev, str, chars_left, true);
542 cb->fill(cb, str, val);
546 /* This should be called with blkg->stats_lock held */
547 static uint64_t blkio_get_stat(struct blkio_group *blkg,
548 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
551 char key_str[MAX_KEY_LEN];
552 enum stat_sub_type sub_type;
554 if (type == BLKIO_STAT_TIME)
555 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
556 blkg->stats.time, cb, dev);
557 if (type == BLKIO_STAT_SECTORS)
558 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
559 blkg->stats.sectors, cb, dev);
560 #ifdef CONFIG_DEBUG_BLK_CGROUP
561 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
562 uint64_t sum = blkg->stats.avg_queue_size_sum;
563 uint64_t samples = blkg->stats.avg_queue_size_samples;
565 do_div(sum, samples);
568 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
570 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
571 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
572 blkg->stats.group_wait_time, cb, dev);
573 if (type == BLKIO_STAT_IDLE_TIME)
574 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
575 blkg->stats.idle_time, cb, dev);
576 if (type == BLKIO_STAT_EMPTY_TIME)
577 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
578 blkg->stats.empty_time, cb, dev);
579 if (type == BLKIO_STAT_DEQUEUE)
580 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
581 blkg->stats.dequeue, cb, dev);
584 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
586 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
587 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
589 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
590 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
591 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
592 cb->fill(cb, key_str, disk_total);
596 #define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \
597 static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
598 struct cftype *cftype, struct cgroup_map_cb *cb) \
600 struct blkio_cgroup *blkcg; \
601 struct blkio_group *blkg; \
602 struct hlist_node *n; \
603 uint64_t cgroup_total = 0; \
605 if (!cgroup_lock_live_group(cgroup)) \
608 blkcg = cgroup_to_blkio_cgroup(cgroup); \
610 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
612 spin_lock_irq(&blkg->stats_lock); \
613 cgroup_total += blkio_get_stat(blkg, cb, \
615 spin_unlock_irq(&blkg->stats_lock); \
619 cb->fill(cb, "Total", cgroup_total); \
625 SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0);
626 SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0);
627 SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1);
628 SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1);
629 SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1);
630 SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1);
631 SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1);
632 SHOW_FUNCTION_PER_GROUP(io_queued, BLKIO_STAT_QUEUED, 1);
633 #ifdef CONFIG_DEBUG_BLK_CGROUP
634 SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0);
635 SHOW_FUNCTION_PER_GROUP(avg_queue_size, BLKIO_STAT_AVG_QUEUE_SIZE, 0);
636 SHOW_FUNCTION_PER_GROUP(group_wait_time, BLKIO_STAT_GROUP_WAIT_TIME, 0);
637 SHOW_FUNCTION_PER_GROUP(idle_time, BLKIO_STAT_IDLE_TIME, 0);
638 SHOW_FUNCTION_PER_GROUP(empty_time, BLKIO_STAT_EMPTY_TIME, 0);
640 #undef SHOW_FUNCTION_PER_GROUP
642 static int blkio_check_dev_num(dev_t dev)
645 struct gendisk *disk;
647 disk = get_gendisk(dev, &part);
654 static int blkio_policy_parse_and_set(char *buf,
655 struct blkio_policy_node *newpn)
657 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
659 unsigned long major, minor, temp;
663 memset(s, 0, sizeof(s));
665 while ((p = strsep(&buf, " ")) != NULL) {
671 /* Prevent from inputing too many things */
679 p = strsep(&s[0], ":");
689 ret = strict_strtoul(major_s, 10, &major);
693 ret = strict_strtoul(minor_s, 10, &minor);
697 dev = MKDEV(major, minor);
699 ret = blkio_check_dev_num(dev);
708 ret = strict_strtoul(s[1], 10, &temp);
709 if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
710 temp > BLKIO_WEIGHT_MAX)
713 newpn->weight = temp;
718 unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
721 struct blkio_policy_node *pn;
723 pn = blkio_policy_search_node(blkcg, dev);
727 return blkcg->weight;
729 EXPORT_SYMBOL_GPL(blkcg_get_weight);
732 static int blkiocg_weight_device_write(struct cgroup *cgrp, struct cftype *cft,
737 struct blkio_policy_node *newpn, *pn;
738 struct blkio_cgroup *blkcg;
739 struct blkio_group *blkg;
741 struct hlist_node *n;
742 struct blkio_policy_type *blkiop;
744 buf = kstrdup(buffer, GFP_KERNEL);
748 newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
754 ret = blkio_policy_parse_and_set(buf, newpn);
758 blkcg = cgroup_to_blkio_cgroup(cgrp);
760 spin_lock_irq(&blkcg->lock);
762 pn = blkio_policy_search_node(blkcg, newpn->dev);
764 if (newpn->weight != 0) {
765 blkio_policy_insert_node(blkcg, newpn);
768 spin_unlock_irq(&blkcg->lock);
769 goto update_io_group;
772 if (newpn->weight == 0) {
773 /* weight == 0 means deleteing a specific weight */
774 blkio_policy_delete_node(pn);
775 spin_unlock_irq(&blkcg->lock);
776 goto update_io_group;
778 spin_unlock_irq(&blkcg->lock);
780 pn->weight = newpn->weight;
783 /* update weight for each cfqg */
784 spin_lock(&blkio_list_lock);
785 spin_lock_irq(&blkcg->lock);
787 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
788 if (newpn->dev == blkg->dev) {
789 list_for_each_entry(blkiop, &blkio_list, list)
790 blkiop->ops.blkio_update_group_weight_fn(blkg,
797 spin_unlock_irq(&blkcg->lock);
798 spin_unlock(&blkio_list_lock);
808 static int blkiocg_weight_device_read(struct cgroup *cgrp, struct cftype *cft,
811 struct blkio_cgroup *blkcg;
812 struct blkio_policy_node *pn;
814 seq_printf(m, "dev\tweight\n");
816 blkcg = cgroup_to_blkio_cgroup(cgrp);
817 if (list_empty(&blkcg->policy_list))
820 spin_lock_irq(&blkcg->lock);
821 list_for_each_entry(pn, &blkcg->policy_list, node) {
822 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
823 MINOR(pn->dev), pn->weight);
825 spin_unlock_irq(&blkcg->lock);
831 struct cftype blkio_files[] = {
833 .name = "weight_device",
834 .read_seq_string = blkiocg_weight_device_read,
835 .write_string = blkiocg_weight_device_write,
836 .max_write_len = 256,
840 .read_u64 = blkiocg_weight_read,
841 .write_u64 = blkiocg_weight_write,
845 .read_map = blkiocg_time_read,
849 .read_map = blkiocg_sectors_read,
852 .name = "io_service_bytes",
853 .read_map = blkiocg_io_service_bytes_read,
856 .name = "io_serviced",
857 .read_map = blkiocg_io_serviced_read,
860 .name = "io_service_time",
861 .read_map = blkiocg_io_service_time_read,
864 .name = "io_wait_time",
865 .read_map = blkiocg_io_wait_time_read,
869 .read_map = blkiocg_io_merged_read,
873 .read_map = blkiocg_io_queued_read,
876 .name = "reset_stats",
877 .write_u64 = blkiocg_reset_stats,
879 #ifdef CONFIG_DEBUG_BLK_CGROUP
881 .name = "avg_queue_size",
882 .read_map = blkiocg_avg_queue_size_read,
885 .name = "group_wait_time",
886 .read_map = blkiocg_group_wait_time_read,
890 .read_map = blkiocg_idle_time_read,
893 .name = "empty_time",
894 .read_map = blkiocg_empty_time_read,
898 .read_map = blkiocg_dequeue_read,
903 static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
905 return cgroup_add_files(cgroup, subsys, blkio_files,
906 ARRAY_SIZE(blkio_files));
909 static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
911 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
913 struct blkio_group *blkg;
915 struct blkio_policy_type *blkiop;
916 struct blkio_policy_node *pn, *pntmp;
920 spin_lock_irqsave(&blkcg->lock, flags);
922 if (hlist_empty(&blkcg->blkg_list)) {
923 spin_unlock_irqrestore(&blkcg->lock, flags);
927 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
929 key = rcu_dereference(blkg->key);
930 __blkiocg_del_blkio_group(blkg);
932 spin_unlock_irqrestore(&blkcg->lock, flags);
935 * This blkio_group is being unlinked as associated cgroup is going
936 * away. Let all the IO controlling policies know about this event.
938 * Currently this is static call to one io controlling policy. Once
939 * we have more policies in place, we need some dynamic registration
940 * of callback function.
942 spin_lock(&blkio_list_lock);
943 list_for_each_entry(blkiop, &blkio_list, list)
944 blkiop->ops.blkio_unlink_group_fn(key, blkg);
945 spin_unlock(&blkio_list_lock);
949 list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
950 blkio_policy_delete_node(pn);
953 free_css_id(&blkio_subsys, &blkcg->css);
955 if (blkcg != &blkio_root_cgroup)
959 static struct cgroup_subsys_state *
960 blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
962 struct blkio_cgroup *blkcg, *parent_blkcg;
964 if (!cgroup->parent) {
965 blkcg = &blkio_root_cgroup;
969 /* Currently we do not support hierarchy deeper than two level (0,1) */
970 parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
971 if (css_depth(&parent_blkcg->css) > 0)
972 return ERR_PTR(-EINVAL);
974 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
976 return ERR_PTR(-ENOMEM);
978 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
980 spin_lock_init(&blkcg->lock);
981 INIT_HLIST_HEAD(&blkcg->blkg_list);
983 INIT_LIST_HEAD(&blkcg->policy_list);
988 * We cannot support shared io contexts, as we have no mean to support
989 * two tasks with the same ioc in two different groups without major rework
990 * of the main cic data structures. For now we allow a task to change
991 * its cgroup only if it's the only owner of its ioc.
993 static int blkiocg_can_attach(struct cgroup_subsys *subsys,
994 struct cgroup *cgroup, struct task_struct *tsk,
997 struct io_context *ioc;
1000 /* task_lock() is needed to avoid races with exit_io_context() */
1002 ioc = tsk->io_context;
1003 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1010 static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
1011 struct cgroup *prev, struct task_struct *tsk,
1014 struct io_context *ioc;
1017 ioc = tsk->io_context;
1019 ioc->cgroup_changed = 1;
1023 void blkio_policy_register(struct blkio_policy_type *blkiop)
1025 spin_lock(&blkio_list_lock);
1026 list_add_tail(&blkiop->list, &blkio_list);
1027 spin_unlock(&blkio_list_lock);
1029 EXPORT_SYMBOL_GPL(blkio_policy_register);
1031 void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1033 spin_lock(&blkio_list_lock);
1034 list_del_init(&blkiop->list);
1035 spin_unlock(&blkio_list_lock);
1037 EXPORT_SYMBOL_GPL(blkio_policy_unregister);
1039 static int __init init_cgroup_blkio(void)
1041 return cgroup_load_subsys(&blkio_subsys);
1044 static void __exit exit_cgroup_blkio(void)
1046 cgroup_unload_subsys(&blkio_subsys);
1049 module_init(init_cgroup_blkio);
1050 module_exit(exit_cgroup_blkio);
1051 MODULE_LICENSE("GPL");