devcgroup: make a helper to convert cgroup_subsys_state to devs_cgroup
[safe/jmp/linux-2.6] / security / device_cgroup.c
1 /*
2  * dev_cgroup.c - device cgroup subsystem
3  *
4  * Copyright 2007 IBM Corp
5  */
6
7 #include <linux/device_cgroup.h>
8 #include <linux/cgroup.h>
9 #include <linux/ctype.h>
10 #include <linux/list.h>
11 #include <linux/uaccess.h>
12 #include <linux/seq_file.h>
13
14 #define ACC_MKNOD 1
15 #define ACC_READ  2
16 #define ACC_WRITE 4
17 #define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE)
18
19 #define DEV_BLOCK 1
20 #define DEV_CHAR  2
21 #define DEV_ALL   4  /* this represents all devices */
22
23 /*
24  * whitelist locking rules:
25  * cgroup_lock() cannot be taken under dev_cgroup->lock.
26  * dev_cgroup->lock can be taken with or without cgroup_lock().
27  *
28  * modifications always require cgroup_lock
29  * modifications to a list which is visible require the
30  *   dev_cgroup->lock *and* cgroup_lock()
31  * walking the list requires dev_cgroup->lock or cgroup_lock().
32  *
33  * reasoning: dev_whitelist_copy() needs to kmalloc, so needs
34  *   a mutex, which the cgroup_lock() is.  Since modifying
35  *   a visible list requires both locks, either lock can be
36  *   taken for walking the list.
37  */
38
39 struct dev_whitelist_item {
40         u32 major, minor;
41         short type;
42         short access;
43         struct list_head list;
44 };
45
46 struct dev_cgroup {
47         struct cgroup_subsys_state css;
48         struct list_head whitelist;
49         spinlock_t lock;
50 };
51
52 static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
53 {
54         return container_of(s, struct dev_cgroup, css);
55 }
56
57 static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
58 {
59         return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id));
60 }
61
62 struct cgroup_subsys devices_subsys;
63
64 static int devcgroup_can_attach(struct cgroup_subsys *ss,
65                 struct cgroup *new_cgroup, struct task_struct *task)
66 {
67         if (current != task && !capable(CAP_SYS_ADMIN))
68                         return -EPERM;
69
70         return 0;
71 }
72
73 /*
74  * called under cgroup_lock()
75  */
76 static int dev_whitelist_copy(struct list_head *dest, struct list_head *orig)
77 {
78         struct dev_whitelist_item *wh, *tmp, *new;
79
80         list_for_each_entry(wh, orig, list) {
81                 new = kmalloc(sizeof(*wh), GFP_KERNEL);
82                 if (!new)
83                         goto free_and_exit;
84                 new->major = wh->major;
85                 new->minor = wh->minor;
86                 new->type = wh->type;
87                 new->access = wh->access;
88                 list_add_tail(&new->list, dest);
89         }
90
91         return 0;
92
93 free_and_exit:
94         list_for_each_entry_safe(wh, tmp, dest, list) {
95                 list_del(&wh->list);
96                 kfree(wh);
97         }
98         return -ENOMEM;
99 }
100
101 /* Stupid prototype - don't bother combining existing entries */
102 /*
103  * called under cgroup_lock()
104  * since the list is visible to other tasks, we need the spinlock also
105  */
106 static int dev_whitelist_add(struct dev_cgroup *dev_cgroup,
107                         struct dev_whitelist_item *wh)
108 {
109         struct dev_whitelist_item *whcopy;
110
111         whcopy = kmalloc(sizeof(*whcopy), GFP_KERNEL);
112         if (!whcopy)
113                 return -ENOMEM;
114
115         memcpy(whcopy, wh, sizeof(*whcopy));
116         spin_lock(&dev_cgroup->lock);
117         list_add_tail(&whcopy->list, &dev_cgroup->whitelist);
118         spin_unlock(&dev_cgroup->lock);
119         return 0;
120 }
121
122 /*
123  * called under cgroup_lock()
124  * since the list is visible to other tasks, we need the spinlock also
125  */
126 static void dev_whitelist_rm(struct dev_cgroup *dev_cgroup,
127                         struct dev_whitelist_item *wh)
128 {
129         struct dev_whitelist_item *walk, *tmp;
130
131         spin_lock(&dev_cgroup->lock);
132         list_for_each_entry_safe(walk, tmp, &dev_cgroup->whitelist, list) {
133                 if (walk->type == DEV_ALL)
134                         goto remove;
135                 if (walk->type != wh->type)
136                         continue;
137                 if (walk->major != ~0 && walk->major != wh->major)
138                         continue;
139                 if (walk->minor != ~0 && walk->minor != wh->minor)
140                         continue;
141
142 remove:
143                 walk->access &= ~wh->access;
144                 if (!walk->access) {
145                         list_del(&walk->list);
146                         kfree(walk);
147                 }
148         }
149         spin_unlock(&dev_cgroup->lock);
150 }
151
152 /*
153  * called from kernel/cgroup.c with cgroup_lock() held.
154  */
155 static struct cgroup_subsys_state *devcgroup_create(struct cgroup_subsys *ss,
156                                                 struct cgroup *cgroup)
157 {
158         struct dev_cgroup *dev_cgroup, *parent_dev_cgroup;
159         struct cgroup *parent_cgroup;
160         int ret;
161
162         dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
163         if (!dev_cgroup)
164                 return ERR_PTR(-ENOMEM);
165         INIT_LIST_HEAD(&dev_cgroup->whitelist);
166         parent_cgroup = cgroup->parent;
167
168         if (parent_cgroup == NULL) {
169                 struct dev_whitelist_item *wh;
170                 wh = kmalloc(sizeof(*wh), GFP_KERNEL);
171                 if (!wh) {
172                         kfree(dev_cgroup);
173                         return ERR_PTR(-ENOMEM);
174                 }
175                 wh->minor = wh->major = ~0;
176                 wh->type = DEV_ALL;
177                 wh->access = ACC_MKNOD | ACC_READ | ACC_WRITE;
178                 list_add(&wh->list, &dev_cgroup->whitelist);
179         } else {
180                 parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
181                 ret = dev_whitelist_copy(&dev_cgroup->whitelist,
182                                 &parent_dev_cgroup->whitelist);
183                 if (ret) {
184                         kfree(dev_cgroup);
185                         return ERR_PTR(ret);
186                 }
187         }
188
189         spin_lock_init(&dev_cgroup->lock);
190         return &dev_cgroup->css;
191 }
192
193 static void devcgroup_destroy(struct cgroup_subsys *ss,
194                         struct cgroup *cgroup)
195 {
196         struct dev_cgroup *dev_cgroup;
197         struct dev_whitelist_item *wh, *tmp;
198
199         dev_cgroup = cgroup_to_devcgroup(cgroup);
200         list_for_each_entry_safe(wh, tmp, &dev_cgroup->whitelist, list) {
201                 list_del(&wh->list);
202                 kfree(wh);
203         }
204         kfree(dev_cgroup);
205 }
206
207 #define DEVCG_ALLOW 1
208 #define DEVCG_DENY 2
209 #define DEVCG_LIST 3
210
211 #define MAJMINLEN 10
212 #define ACCLEN 4
213
214 static void set_access(char *acc, short access)
215 {
216         int idx = 0;
217         memset(acc, 0, ACCLEN);
218         if (access & ACC_READ)
219                 acc[idx++] = 'r';
220         if (access & ACC_WRITE)
221                 acc[idx++] = 'w';
222         if (access & ACC_MKNOD)
223                 acc[idx++] = 'm';
224 }
225
226 static char type_to_char(short type)
227 {
228         if (type == DEV_ALL)
229                 return 'a';
230         if (type == DEV_CHAR)
231                 return 'c';
232         if (type == DEV_BLOCK)
233                 return 'b';
234         return 'X';
235 }
236
237 static void set_majmin(char *str, unsigned m)
238 {
239         memset(str, 0, MAJMINLEN);
240         if (m == ~0)
241                 sprintf(str, "*");
242         else
243                 snprintf(str, MAJMINLEN, "%d", m);
244 }
245
246 static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
247                                 struct seq_file *m)
248 {
249         struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
250         struct dev_whitelist_item *wh;
251         char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
252
253         spin_lock(&devcgroup->lock);
254         list_for_each_entry(wh, &devcgroup->whitelist, list) {
255                 set_access(acc, wh->access);
256                 set_majmin(maj, wh->major);
257                 set_majmin(min, wh->minor);
258                 seq_printf(m, "%c %s:%s %s\n", type_to_char(wh->type),
259                            maj, min, acc);
260         }
261         spin_unlock(&devcgroup->lock);
262
263         return 0;
264 }
265
266 /*
267  * may_access_whitelist:
268  * does the access granted to dev_cgroup c contain the access
269  * requested in whitelist item refwh.
270  * return 1 if yes, 0 if no.
271  * call with c->lock held
272  */
273 static int may_access_whitelist(struct dev_cgroup *c,
274                                        struct dev_whitelist_item *refwh)
275 {
276         struct dev_whitelist_item *whitem;
277
278         list_for_each_entry(whitem, &c->whitelist, list) {
279                 if (whitem->type & DEV_ALL)
280                         return 1;
281                 if ((refwh->type & DEV_BLOCK) && !(whitem->type & DEV_BLOCK))
282                         continue;
283                 if ((refwh->type & DEV_CHAR) && !(whitem->type & DEV_CHAR))
284                         continue;
285                 if (whitem->major != ~0 && whitem->major != refwh->major)
286                         continue;
287                 if (whitem->minor != ~0 && whitem->minor != refwh->minor)
288                         continue;
289                 if (refwh->access & (~(whitem->access | ACC_MASK)))
290                         continue;
291                 return 1;
292         }
293         return 0;
294 }
295
296 /*
297  * parent_has_perm:
298  * when adding a new allow rule to a device whitelist, the rule
299  * must be allowed in the parent device
300  */
301 static int parent_has_perm(struct cgroup *childcg,
302                                   struct dev_whitelist_item *wh)
303 {
304         struct cgroup *pcg = childcg->parent;
305         struct dev_cgroup *parent;
306         int ret;
307
308         if (!pcg)
309                 return 1;
310         parent = cgroup_to_devcgroup(pcg);
311         spin_lock(&parent->lock);
312         ret = may_access_whitelist(parent, wh);
313         spin_unlock(&parent->lock);
314         return ret;
315 }
316
317 /*
318  * Modify the whitelist using allow/deny rules.
319  * CAP_SYS_ADMIN is needed for this.  It's at least separate from CAP_MKNOD
320  * so we can give a container CAP_MKNOD to let it create devices but not
321  * modify the whitelist.
322  * It seems likely we'll want to add a CAP_CONTAINER capability to allow
323  * us to also grant CAP_SYS_ADMIN to containers without giving away the
324  * device whitelist controls, but for now we'll stick with CAP_SYS_ADMIN
325  *
326  * Taking rules away is always allowed (given CAP_SYS_ADMIN).  Granting
327  * new access is only allowed if you're in the top-level cgroup, or your
328  * parent cgroup has the access you're asking for.
329  */
330 static ssize_t devcgroup_access_write(struct cgroup *cgroup, struct cftype *cft,
331                                 struct file *file, const char __user *userbuf,
332                                 size_t nbytes, loff_t *ppos)
333 {
334         struct cgroup *cur_cgroup;
335         struct dev_cgroup *devcgroup, *cur_devcgroup;
336         int filetype = cft->private;
337         char *buffer, *b;
338         int retval = 0, count;
339         struct dev_whitelist_item wh;
340
341         if (!capable(CAP_SYS_ADMIN))
342                 return -EPERM;
343
344         devcgroup = cgroup_to_devcgroup(cgroup);
345         cur_cgroup = task_cgroup(current, devices_subsys.subsys_id);
346         cur_devcgroup = cgroup_to_devcgroup(cur_cgroup);
347
348         buffer = kmalloc(nbytes+1, GFP_KERNEL);
349         if (!buffer)
350                 return -ENOMEM;
351
352         if (copy_from_user(buffer, userbuf, nbytes)) {
353                 retval = -EFAULT;
354                 goto out1;
355         }
356         buffer[nbytes] = 0;     /* nul-terminate */
357
358         cgroup_lock();
359         if (cgroup_is_removed(cgroup)) {
360                 retval = -ENODEV;
361                 goto out2;
362         }
363
364         memset(&wh, 0, sizeof(wh));
365         b = buffer;
366
367         switch (*b) {
368         case 'a':
369                 wh.type = DEV_ALL;
370                 wh.access = ACC_MASK;
371                 goto handle;
372         case 'b':
373                 wh.type = DEV_BLOCK;
374                 break;
375         case 'c':
376                 wh.type = DEV_CHAR;
377                 break;
378         default:
379                 retval = -EINVAL;
380                 goto out2;
381         }
382         b++;
383         if (!isspace(*b)) {
384                 retval = -EINVAL;
385                 goto out2;
386         }
387         b++;
388         if (*b == '*') {
389                 wh.major = ~0;
390                 b++;
391         } else if (isdigit(*b)) {
392                 wh.major = 0;
393                 while (isdigit(*b)) {
394                         wh.major = wh.major*10+(*b-'0');
395                         b++;
396                 }
397         } else {
398                 retval = -EINVAL;
399                 goto out2;
400         }
401         if (*b != ':') {
402                 retval = -EINVAL;
403                 goto out2;
404         }
405         b++;
406
407         /* read minor */
408         if (*b == '*') {
409                 wh.minor = ~0;
410                 b++;
411         } else if (isdigit(*b)) {
412                 wh.minor = 0;
413                 while (isdigit(*b)) {
414                         wh.minor = wh.minor*10+(*b-'0');
415                         b++;
416                 }
417         } else {
418                 retval = -EINVAL;
419                 goto out2;
420         }
421         if (!isspace(*b)) {
422                 retval = -EINVAL;
423                 goto out2;
424         }
425         for (b++, count = 0; count < 3; count++, b++) {
426                 switch (*b) {
427                 case 'r':
428                         wh.access |= ACC_READ;
429                         break;
430                 case 'w':
431                         wh.access |= ACC_WRITE;
432                         break;
433                 case 'm':
434                         wh.access |= ACC_MKNOD;
435                         break;
436                 case '\n':
437                 case '\0':
438                         count = 3;
439                         break;
440                 default:
441                         retval = -EINVAL;
442                         goto out2;
443                 }
444         }
445
446 handle:
447         retval = 0;
448         switch (filetype) {
449         case DEVCG_ALLOW:
450                 if (!parent_has_perm(cgroup, &wh))
451                         retval = -EPERM;
452                 else
453                         retval = dev_whitelist_add(devcgroup, &wh);
454                 break;
455         case DEVCG_DENY:
456                 dev_whitelist_rm(devcgroup, &wh);
457                 break;
458         default:
459                 retval = -EINVAL;
460                 goto out2;
461         }
462
463         if (retval == 0)
464                 retval = nbytes;
465
466 out2:
467         cgroup_unlock();
468 out1:
469         kfree(buffer);
470         return retval;
471 }
472
473 static struct cftype dev_cgroup_files[] = {
474         {
475                 .name = "allow",
476                 .write  = devcgroup_access_write,
477                 .private = DEVCG_ALLOW,
478         },
479         {
480                 .name = "deny",
481                 .write = devcgroup_access_write,
482                 .private = DEVCG_DENY,
483         },
484         {
485                 .name = "list",
486                 .read_seq_string = devcgroup_seq_read,
487                 .private = DEVCG_LIST,
488         },
489 };
490
491 static int devcgroup_populate(struct cgroup_subsys *ss,
492                                 struct cgroup *cgroup)
493 {
494         return cgroup_add_files(cgroup, ss, dev_cgroup_files,
495                                         ARRAY_SIZE(dev_cgroup_files));
496 }
497
498 struct cgroup_subsys devices_subsys = {
499         .name = "devices",
500         .can_attach = devcgroup_can_attach,
501         .create = devcgroup_create,
502         .destroy  = devcgroup_destroy,
503         .populate = devcgroup_populate,
504         .subsys_id = devices_subsys_id,
505 };
506
507 int devcgroup_inode_permission(struct inode *inode, int mask)
508 {
509         struct cgroup *cgroup;
510         struct dev_cgroup *dev_cgroup;
511         struct dev_whitelist_item *wh;
512
513         dev_t device = inode->i_rdev;
514         if (!device)
515                 return 0;
516         if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
517                 return 0;
518         cgroup = task_cgroup(current, devices_subsys.subsys_id);
519         dev_cgroup = cgroup_to_devcgroup(cgroup);
520         if (!dev_cgroup)
521                 return 0;
522
523         spin_lock(&dev_cgroup->lock);
524         list_for_each_entry(wh, &dev_cgroup->whitelist, list) {
525                 if (wh->type & DEV_ALL)
526                         goto acc_check;
527                 if ((wh->type & DEV_BLOCK) && !S_ISBLK(inode->i_mode))
528                         continue;
529                 if ((wh->type & DEV_CHAR) && !S_ISCHR(inode->i_mode))
530                         continue;
531                 if (wh->major != ~0 && wh->major != imajor(inode))
532                         continue;
533                 if (wh->minor != ~0 && wh->minor != iminor(inode))
534                         continue;
535 acc_check:
536                 if ((mask & MAY_WRITE) && !(wh->access & ACC_WRITE))
537                         continue;
538                 if ((mask & MAY_READ) && !(wh->access & ACC_READ))
539                         continue;
540                 spin_unlock(&dev_cgroup->lock);
541                 return 0;
542         }
543         spin_unlock(&dev_cgroup->lock);
544
545         return -EPERM;
546 }
547
548 int devcgroup_inode_mknod(int mode, dev_t dev)
549 {
550         struct cgroup *cgroup;
551         struct dev_cgroup *dev_cgroup;
552         struct dev_whitelist_item *wh;
553
554         cgroup = task_cgroup(current, devices_subsys.subsys_id);
555         dev_cgroup = cgroup_to_devcgroup(cgroup);
556         if (!dev_cgroup)
557                 return 0;
558
559         spin_lock(&dev_cgroup->lock);
560         list_for_each_entry(wh, &dev_cgroup->whitelist, list) {
561                 if (wh->type & DEV_ALL)
562                         goto acc_check;
563                 if ((wh->type & DEV_BLOCK) && !S_ISBLK(mode))
564                         continue;
565                 if ((wh->type & DEV_CHAR) && !S_ISCHR(mode))
566                         continue;
567                 if (wh->major != ~0 && wh->major != MAJOR(dev))
568                         continue;
569                 if (wh->minor != ~0 && wh->minor != MINOR(dev))
570                         continue;
571 acc_check:
572                 if (!(wh->access & ACC_MKNOD))
573                         continue;
574                 spin_unlock(&dev_cgroup->lock);
575                 return 0;
576         }
577         spin_unlock(&dev_cgroup->lock);
578         return -EPERM;
579 }