4 * Common Block IO controller cgroup interface
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
16 #include <linux/cgroup.h>
18 #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
20 #ifndef CONFIG_BLK_CGROUP
21 /* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */
22 extern struct cgroup_subsys blkio_subsys;
23 #define blkio_subsys_id blkio_subsys.subsys_id
27 /* Total time spent (in ns) between request dispatch to the driver and
28 * request completion for IOs doen by this cgroup. This may not be
29 * accurate when NCQ is turned on. */
30 BLKIO_STAT_SERVICE_TIME = 0,
31 /* Total bytes transferred */
32 BLKIO_STAT_SERVICE_BYTES,
33 /* Total IOs serviced, post merge */
35 /* Total time spent waiting in scheduler queue in ns */
37 /* Number of IOs merged */
39 /* Number of IOs queued up */
41 /* All the single valued stats go below this */
44 #ifdef CONFIG_DEBUG_BLK_CGROUP
45 BLKIO_STAT_AVG_QUEUE_SIZE,
59 struct cgroup_subsys_state css;
62 struct hlist_head blkg_list;
65 struct blkio_group_stats {
66 /* total disk time and nr sectors dispatched by this group */
69 uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
70 #ifdef CONFIG_DEBUG_BLK_CGROUP
71 /* Sum of number of IOs queued across all samples */
72 uint64_t avg_queue_size_sum;
73 /* Count of samples taken for average */
74 uint64_t avg_queue_size_samples;
75 /* How many times this group has been removed from service tree */
76 unsigned long dequeue;
81 /* An rcu protected unique identifier for the group */
83 struct hlist_node blkcg_node;
84 unsigned short blkcg_id;
85 #ifdef CONFIG_DEBUG_BLK_CGROUP
86 /* Store cgroup path */
89 /* The device MKDEV(major, minor), this group has been created for */
92 /* Need to serialize the stats in the case of reset/update */
93 spinlock_t stats_lock;
94 struct blkio_group_stats stats;
97 typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
98 typedef void (blkio_update_group_weight_fn) (struct blkio_group *blkg,
101 struct blkio_policy_ops {
102 blkio_unlink_group_fn *blkio_unlink_group_fn;
103 blkio_update_group_weight_fn *blkio_update_group_weight_fn;
106 struct blkio_policy_type {
107 struct list_head list;
108 struct blkio_policy_ops ops;
111 /* Blkio controller policy registration */
112 extern void blkio_policy_register(struct blkio_policy_type *);
113 extern void blkio_policy_unregister(struct blkio_policy_type *);
120 struct blkio_policy_type {
123 static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
124 static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
128 #define BLKIO_WEIGHT_MIN 100
129 #define BLKIO_WEIGHT_MAX 1000
130 #define BLKIO_WEIGHT_DEFAULT 500
132 #ifdef CONFIG_DEBUG_BLK_CGROUP
133 static inline char *blkg_path(struct blkio_group *blkg)
137 void blkiocg_update_set_active_queue_stats(struct blkio_group *blkg);
138 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
139 unsigned long dequeue);
141 static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
142 static inline void blkiocg_update_set_active_queue_stats(
143 struct blkio_group *blkg) {}
144 static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
145 unsigned long dequeue) {}
148 #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
149 extern struct blkio_cgroup blkio_root_cgroup;
150 extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
151 extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
152 struct blkio_group *blkg, void *key, dev_t dev);
153 extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
154 extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
156 void blkio_group_init(struct blkio_group *blkg);
157 void blkiocg_update_timeslice_used(struct blkio_group *blkg,
159 void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
160 bool direction, bool sync);
161 void blkiocg_update_completion_stats(struct blkio_group *blkg,
162 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync);
163 void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
165 void blkiocg_update_request_add_stats(struct blkio_group *blkg,
166 struct blkio_group *curr_blkg, bool direction, bool sync);
167 void blkiocg_update_request_remove_stats(struct blkio_group *blkg,
168 bool direction, bool sync);
171 static inline struct blkio_cgroup *
172 cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
174 static inline void blkio_group_init(struct blkio_group *blkg) {}
175 static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
176 struct blkio_group *blkg, void *key, dev_t dev) {}
179 blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
181 static inline struct blkio_group *
182 blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
183 static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
184 unsigned long time) {}
185 static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
186 uint64_t bytes, bool direction, bool sync) {}
187 static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
188 uint64_t start_time, uint64_t io_start_time, bool direction,
190 static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
191 bool direction, bool sync) {}
192 static inline void blkiocg_update_request_add_stats(struct blkio_group *blkg,
193 struct blkio_group *curr_blkg, bool direction, bool sync) {}
194 static inline void blkiocg_update_request_remove_stats(struct blkio_group *blkg,
195 bool direction, bool sync) {}
197 #endif /* _BLK_CGROUP_H */