X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=security%2Fdevice_cgroup.c;h=6cf8fd2b79e80df26e142aa94e6fed9d4c3e7015;hb=0b7570e77f7c3abd43107dabc47ea89daf9a1cba;hp=d5c15a7c67c3b5cf6a0d26a93e4c63072d8b85dc;hpb=c012a54ae0b2ee2c73499f54596e0f5257288fec;p=safe%2Fjmp%2Flinux-2.6 diff --git a/security/device_cgroup.c b/security/device_cgroup.c index d5c15a7..6cf8fd2 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -1,5 +1,5 @@ /* - * dev_cgroup.c - device cgroup subsystem + * device_cgroup.c - device cgroup subsystem * * Copyright 2007 IBM Corp */ @@ -10,6 +10,8 @@ #include #include #include +#include +#include #define ACC_MKNOD 1 #define ACC_READ 2 @@ -20,20 +22,12 @@ #define DEV_CHAR 2 #define DEV_ALL 4 /* this represents all devices */ +static DEFINE_MUTEX(devcgroup_mutex); + /* * whitelist locking rules: - * cgroup_lock() cannot be taken under dev_cgroup->lock. - * dev_cgroup->lock can be taken with or without cgroup_lock(). - * - * modifications always require cgroup_lock - * modifications to a list which is visible require the - * dev_cgroup->lock *and* cgroup_lock() - * walking the list requires dev_cgroup->lock or cgroup_lock(). - * - * reasoning: dev_whitelist_copy() needs to kmalloc, so needs - * a mutex, which the cgroup_lock() is. Since modifying - * a visible list requires both locks, either lock can be - * taken for walking the list. + * hold devcgroup_mutex for update/read. + * hold rcu_read_lock() for read. */ struct dev_whitelist_item { @@ -47,7 +41,6 @@ struct dev_whitelist_item { struct dev_cgroup { struct cgroup_subsys_state css; struct list_head whitelist; - spinlock_t lock; }; static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s) @@ -68,7 +61,8 @@ static inline struct dev_cgroup *task_devcgroup(struct task_struct *task) struct cgroup_subsys devices_subsys; static int devcgroup_can_attach(struct cgroup_subsys *ss, - struct cgroup *new_cgroup, struct task_struct *task) + struct cgroup *new_cgroup, struct task_struct *task, + bool threadgroup) { if (current != task && !capable(CAP_SYS_ADMIN)) return -EPERM; @@ -77,7 +71,7 @@ static int devcgroup_can_attach(struct cgroup_subsys *ss, } /* - * called under cgroup_lock() + * called under devcgroup_mutex */ static int dev_whitelist_copy(struct list_head *dest, struct list_head *orig) { @@ -102,8 +96,7 @@ free_and_exit: /* Stupid prototype - don't bother combining existing entries */ /* - * called under cgroup_lock() - * since the list is visible to other tasks, we need the spinlock also + * called under devcgroup_mutex */ static int dev_whitelist_add(struct dev_cgroup *dev_cgroup, struct dev_whitelist_item *wh) @@ -114,7 +107,6 @@ static int dev_whitelist_add(struct dev_cgroup *dev_cgroup, if (!whcopy) return -ENOMEM; - spin_lock(&dev_cgroup->lock); list_for_each_entry(walk, &dev_cgroup->whitelist, list) { if (walk->type != wh->type) continue; @@ -130,7 +122,6 @@ static int dev_whitelist_add(struct dev_cgroup *dev_cgroup, if (whcopy != NULL) list_add_tail_rcu(&whcopy->list, &dev_cgroup->whitelist); - spin_unlock(&dev_cgroup->lock); return 0; } @@ -143,15 +134,13 @@ static void whitelist_item_free(struct rcu_head *rcu) } /* - * called under cgroup_lock() - * since the list is visible to other tasks, we need the spinlock also + * called under devcgroup_mutex */ static void dev_whitelist_rm(struct dev_cgroup *dev_cgroup, struct dev_whitelist_item *wh) { struct dev_whitelist_item *walk, *tmp; - spin_lock(&dev_cgroup->lock); list_for_each_entry_safe(walk, tmp, &dev_cgroup->whitelist, list) { if (walk->type == DEV_ALL) goto remove; @@ -169,7 +158,6 @@ remove: call_rcu(&walk->rcu, whitelist_item_free); } } - spin_unlock(&dev_cgroup->lock); } /* @@ -201,15 +189,16 @@ static struct cgroup_subsys_state *devcgroup_create(struct cgroup_subsys *ss, list_add(&wh->list, &dev_cgroup->whitelist); } else { parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup); + mutex_lock(&devcgroup_mutex); ret = dev_whitelist_copy(&dev_cgroup->whitelist, &parent_dev_cgroup->whitelist); + mutex_unlock(&devcgroup_mutex); if (ret) { kfree(dev_cgroup); return ERR_PTR(ret); } } - spin_lock_init(&dev_cgroup->lock); return &dev_cgroup->css; } @@ -290,7 +279,7 @@ static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft, * does the access granted to dev_cgroup c contain the access * requested in whitelist item refwh. * return 1 if yes, 0 if no. - * call with c->lock held + * call with devcgroup_mutex held */ static int may_access_whitelist(struct dev_cgroup *c, struct dev_whitelist_item *refwh) @@ -325,15 +314,11 @@ static int parent_has_perm(struct dev_cgroup *childcg, { struct cgroup *pcg = childcg->css.cgroup->parent; struct dev_cgroup *parent; - int ret; if (!pcg) return 1; parent = cgroup_to_devcgroup(pcg); - spin_lock(&parent->lock); - ret = may_access_whitelist(parent, wh); - spin_unlock(&parent->lock); - return ret; + return may_access_whitelist(parent, wh); } /* @@ -352,7 +337,6 @@ static int parent_has_perm(struct dev_cgroup *childcg, static int devcgroup_update_access(struct dev_cgroup *devcgroup, int filetype, const char *buffer) { - struct dev_cgroup *cur_devcgroup; const char *b; char *endp; int count; @@ -361,8 +345,6 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, if (!capable(CAP_SYS_ADMIN)) return -EPERM; - cur_devcgroup = task_devcgroup(current); - memset(&wh, 0, sizeof(wh)); b = buffer; @@ -450,11 +432,11 @@ static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft, const char *buffer) { int retval; - if (!cgroup_lock_live_group(cgrp)) - return -ENODEV; + + mutex_lock(&devcgroup_mutex); retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp), cft->private, buffer); - cgroup_unlock(); + mutex_unlock(&devcgroup_mutex); return retval; } @@ -509,7 +491,7 @@ int devcgroup_inode_permission(struct inode *inode, int mask) list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) { if (wh->type & DEV_ALL) - goto acc_check; + goto found; if ((wh->type & DEV_BLOCK) && !S_ISBLK(inode->i_mode)) continue; if ((wh->type & DEV_CHAR) && !S_ISCHR(inode->i_mode)) @@ -518,11 +500,12 @@ int devcgroup_inode_permission(struct inode *inode, int mask) continue; if (wh->minor != ~0 && wh->minor != iminor(inode)) continue; -acc_check: + if ((mask & MAY_WRITE) && !(wh->access & ACC_WRITE)) continue; if ((mask & MAY_READ) && !(wh->access & ACC_READ)) continue; +found: rcu_read_unlock(); return 0; } @@ -537,13 +520,16 @@ int devcgroup_inode_mknod(int mode, dev_t dev) struct dev_cgroup *dev_cgroup; struct dev_whitelist_item *wh; + if (!S_ISBLK(mode) && !S_ISCHR(mode)) + return 0; + rcu_read_lock(); dev_cgroup = task_devcgroup(current); - list_for_each_entry(wh, &dev_cgroup->whitelist, list) { + list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) { if (wh->type & DEV_ALL) - goto acc_check; + goto found; if ((wh->type & DEV_BLOCK) && !S_ISBLK(mode)) continue; if ((wh->type & DEV_CHAR) && !S_ISCHR(mode)) @@ -552,9 +538,10 @@ int devcgroup_inode_mknod(int mode, dev_t dev) continue; if (wh->minor != ~0 && wh->minor != MINOR(dev)) continue; -acc_check: + if (!(wh->access & ACC_MKNOD)) continue; +found: rcu_read_unlock(); return 0; }