blkio: Determine async workload length based on total number of queues
[safe/jmp/linux-2.6] / block / blk-cgroup.c
1 /*
2  * Common Block IO controller cgroup interface
3  *
4  * Based on ideas and code from CFQ, CFS and BFQ:
5  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6  *
7  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8  *                    Paolo Valente <paolo.valente@unimore.it>
9  *
10  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11  *                    Nauman Rafique <nauman@google.com>
12  */
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include "blk-cgroup.h"
17
18 extern void cfq_unlink_blkio_group(void *, struct blkio_group *);
19 extern void cfq_update_blkio_group_weight(struct blkio_group *, unsigned int);
20
21 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
22
23 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
24 {
25         return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
26                             struct blkio_cgroup, css);
27 }
28
29 void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
30                         unsigned long time, unsigned long sectors)
31 {
32         blkg->time += time;
33         blkg->sectors += sectors;
34 }
35
36 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
37                         struct blkio_group *blkg, void *key, dev_t dev)
38 {
39         unsigned long flags;
40
41         spin_lock_irqsave(&blkcg->lock, flags);
42         rcu_assign_pointer(blkg->key, key);
43         blkg->blkcg_id = css_id(&blkcg->css);
44         hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
45         spin_unlock_irqrestore(&blkcg->lock, flags);
46 #ifdef CONFIG_DEBUG_BLK_CGROUP
47         /* Need to take css reference ? */
48         cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
49 #endif
50         blkg->dev = dev;
51 }
52
53 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
54 {
55         hlist_del_init_rcu(&blkg->blkcg_node);
56         blkg->blkcg_id = 0;
57 }
58
59 /*
60  * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
61  * indicating that blk_group was unhashed by the time we got to it.
62  */
63 int blkiocg_del_blkio_group(struct blkio_group *blkg)
64 {
65         struct blkio_cgroup *blkcg;
66         unsigned long flags;
67         struct cgroup_subsys_state *css;
68         int ret = 1;
69
70         rcu_read_lock();
71         css = css_lookup(&blkio_subsys, blkg->blkcg_id);
72         if (!css)
73                 goto out;
74
75         blkcg = container_of(css, struct blkio_cgroup, css);
76         spin_lock_irqsave(&blkcg->lock, flags);
77         if (!hlist_unhashed(&blkg->blkcg_node)) {
78                 __blkiocg_del_blkio_group(blkg);
79                 ret = 0;
80         }
81         spin_unlock_irqrestore(&blkcg->lock, flags);
82 out:
83         rcu_read_unlock();
84         return ret;
85 }
86
87 /* called under rcu_read_lock(). */
88 struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
89 {
90         struct blkio_group *blkg;
91         struct hlist_node *n;
92         void *__key;
93
94         hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
95                 __key = blkg->key;
96                 if (__key == key)
97                         return blkg;
98         }
99
100         return NULL;
101 }
102
103 #define SHOW_FUNCTION(__VAR)                                            \
104 static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup,                \
105                                        struct cftype *cftype)           \
106 {                                                                       \
107         struct blkio_cgroup *blkcg;                                     \
108                                                                         \
109         blkcg = cgroup_to_blkio_cgroup(cgroup);                         \
110         return (u64)blkcg->__VAR;                                       \
111 }
112
113 SHOW_FUNCTION(weight);
114 #undef SHOW_FUNCTION
115
116 static int
117 blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
118 {
119         struct blkio_cgroup *blkcg;
120         struct blkio_group *blkg;
121         struct hlist_node *n;
122
123         if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
124                 return -EINVAL;
125
126         blkcg = cgroup_to_blkio_cgroup(cgroup);
127         spin_lock_irq(&blkcg->lock);
128         blkcg->weight = (unsigned int)val;
129         hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
130                 cfq_update_blkio_group_weight(blkg, blkcg->weight);
131         spin_unlock_irq(&blkcg->lock);
132         return 0;
133 }
134
135 #define SHOW_FUNCTION_PER_GROUP(__VAR)                                  \
136 static int blkiocg_##__VAR##_read(struct cgroup *cgroup,                \
137                         struct cftype *cftype, struct seq_file *m)      \
138 {                                                                       \
139         struct blkio_cgroup *blkcg;                                     \
140         struct blkio_group *blkg;                                       \
141         struct hlist_node *n;                                           \
142                                                                         \
143         if (!cgroup_lock_live_group(cgroup))                            \
144                 return -ENODEV;                                         \
145                                                                         \
146         blkcg = cgroup_to_blkio_cgroup(cgroup);                         \
147         rcu_read_lock();                                                \
148         hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
149                 if (blkg->dev)                                          \
150                         seq_printf(m, "%u:%u %lu\n", MAJOR(blkg->dev),  \
151                                  MINOR(blkg->dev), blkg->__VAR);        \
152         }                                                               \
153         rcu_read_unlock();                                              \
154         cgroup_unlock();                                                \
155         return 0;                                                       \
156 }
157
158 SHOW_FUNCTION_PER_GROUP(time);
159 SHOW_FUNCTION_PER_GROUP(sectors);
160 #ifdef CONFIG_DEBUG_BLK_CGROUP
161 SHOW_FUNCTION_PER_GROUP(dequeue);
162 #endif
163 #undef SHOW_FUNCTION_PER_GROUP
164
165 #ifdef CONFIG_DEBUG_BLK_CGROUP
166 void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg,
167                         unsigned long dequeue)
168 {
169         blkg->dequeue += dequeue;
170 }
171 #endif
172
173 struct cftype blkio_files[] = {
174         {
175                 .name = "weight",
176                 .read_u64 = blkiocg_weight_read,
177                 .write_u64 = blkiocg_weight_write,
178         },
179         {
180                 .name = "time",
181                 .read_seq_string = blkiocg_time_read,
182         },
183         {
184                 .name = "sectors",
185                 .read_seq_string = blkiocg_sectors_read,
186         },
187 #ifdef CONFIG_DEBUG_BLK_CGROUP
188        {
189                 .name = "dequeue",
190                 .read_seq_string = blkiocg_dequeue_read,
191        },
192 #endif
193 };
194
195 static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
196 {
197         return cgroup_add_files(cgroup, subsys, blkio_files,
198                                 ARRAY_SIZE(blkio_files));
199 }
200
201 static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
202 {
203         struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
204         unsigned long flags;
205         struct blkio_group *blkg;
206         void *key;
207
208         rcu_read_lock();
209 remove_entry:
210         spin_lock_irqsave(&blkcg->lock, flags);
211
212         if (hlist_empty(&blkcg->blkg_list)) {
213                 spin_unlock_irqrestore(&blkcg->lock, flags);
214                 goto done;
215         }
216
217         blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
218                                 blkcg_node);
219         key = rcu_dereference(blkg->key);
220         __blkiocg_del_blkio_group(blkg);
221
222         spin_unlock_irqrestore(&blkcg->lock, flags);
223
224         /*
225          * This blkio_group is being unlinked as associated cgroup is going
226          * away. Let all the IO controlling policies know about this event.
227          *
228          * Currently this is static call to one io controlling policy. Once
229          * we have more policies in place, we need some dynamic registration
230          * of callback function.
231          */
232         cfq_unlink_blkio_group(key, blkg);
233         goto remove_entry;
234 done:
235         free_css_id(&blkio_subsys, &blkcg->css);
236         rcu_read_unlock();
237         kfree(blkcg);
238 }
239
240 static struct cgroup_subsys_state *
241 blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
242 {
243         struct blkio_cgroup *blkcg, *parent_blkcg;
244
245         if (!cgroup->parent) {
246                 blkcg = &blkio_root_cgroup;
247                 goto done;
248         }
249
250         /* Currently we do not support hierarchy deeper than two level (0,1) */
251         parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
252         if (css_depth(&parent_blkcg->css) > 0)
253                 return ERR_PTR(-EINVAL);
254
255         blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
256         if (!blkcg)
257                 return ERR_PTR(-ENOMEM);
258
259         blkcg->weight = BLKIO_WEIGHT_DEFAULT;
260 done:
261         spin_lock_init(&blkcg->lock);
262         INIT_HLIST_HEAD(&blkcg->blkg_list);
263
264         return &blkcg->css;
265 }
266
267 /*
268  * We cannot support shared io contexts, as we have no mean to support
269  * two tasks with the same ioc in two different groups without major rework
270  * of the main cic data structures.  For now we allow a task to change
271  * its cgroup only if it's the only owner of its ioc.
272  */
273 static int blkiocg_can_attach(struct cgroup_subsys *subsys,
274                                 struct cgroup *cgroup, struct task_struct *tsk,
275                                 bool threadgroup)
276 {
277         struct io_context *ioc;
278         int ret = 0;
279
280         /* task_lock() is needed to avoid races with exit_io_context() */
281         task_lock(tsk);
282         ioc = tsk->io_context;
283         if (ioc && atomic_read(&ioc->nr_tasks) > 1)
284                 ret = -EINVAL;
285         task_unlock(tsk);
286
287         return ret;
288 }
289
290 static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
291                                 struct cgroup *prev, struct task_struct *tsk,
292                                 bool threadgroup)
293 {
294         struct io_context *ioc;
295
296         task_lock(tsk);
297         ioc = tsk->io_context;
298         if (ioc)
299                 ioc->cgroup_changed = 1;
300         task_unlock(tsk);
301 }
302
303 struct cgroup_subsys blkio_subsys = {
304         .name = "blkio",
305         .create = blkiocg_create,
306         .can_attach = blkiocg_can_attach,
307         .attach = blkiocg_attach,
308         .destroy = blkiocg_destroy,
309         .populate = blkiocg_populate,
310         .subsys_id = blkio_subsys_id,
311         .use_id = 1,
312 };