X-Git-Url: http://ftp.safe.ca/?p=safe%2Fjmp%2Flinux-2.6;a=blobdiff_plain;f=kernel%2Fuser.c;h=46d0165ca70c6b10240256fb1ccc2b3420c6c91e;hp=d1ae2349347e1776f99f572bb355a0a8a6435443;hb=e071041be037eca208b62b84469a06bdfc692bea;hpb=4021cb279a532728c3208a16b9b09b0ca8016850 diff --git a/kernel/user.c b/kernel/user.c index d1ae234..46d0165 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -14,64 +14,336 @@ #include #include #include +#include +#include +#include "cred-internals.h" + +struct user_namespace init_user_ns = { + .kref = { + .refcount = ATOMIC_INIT(2), + }, + .creator = &root_user, +}; +EXPORT_SYMBOL_GPL(init_user_ns); /* * UID task count cache, to get fast user lookup in "alloc_uid" * when changing user ID's (ie setuid() and friends). */ -#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 8) -#define UIDHASH_SZ (1 << UIDHASH_BITS) #define UIDHASH_MASK (UIDHASH_SZ - 1) #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) -#define uidhashentry(uid) (uidhash_table + __uidhashfn((uid))) +#define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) -static kmem_cache_t *uid_cachep; -static struct list_head uidhash_table[UIDHASH_SZ]; +static struct kmem_cache *uid_cachep; /* * The uidhash_lock is mostly taken from process context, but it is * occasionally also taken from softirq/tasklet context, when * task-structs get RCU-freed. Hence all locking must be softirq-safe. + * But free_uid() is also called with local interrupts disabled, and running + * local_bh_enable() with local interrupts disabled is an error - we'll run + * softirq callbacks, and they can unconditionally enable interrupts, and + * the caller of free_uid() didn't expect that.. */ static DEFINE_SPINLOCK(uidhash_lock); +/* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */ struct user_struct root_user = { - .__count = ATOMIC_INIT(1), + .__count = ATOMIC_INIT(2), .processes = ATOMIC_INIT(1), .files = ATOMIC_INIT(0), .sigpending = ATOMIC_INIT(0), - .mq_bytes = 0, .locked_shm = 0, -#ifdef CONFIG_KEYS - .uid_keyring = &root_user_keyring, - .session_keyring = &root_session_keyring, + .user_ns = &init_user_ns, +#ifdef CONFIG_USER_SCHED + .tg = &init_task_group, #endif }; /* * These routines must be called with the uidhash spinlock held! */ -static inline void uid_hash_insert(struct user_struct *up, struct list_head *hashent) +static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) +{ + hlist_add_head(&up->uidhash_node, hashent); +} + +static void uid_hash_remove(struct user_struct *up) +{ + hlist_del_init(&up->uidhash_node); + put_user_ns(up->user_ns); +} + +#ifdef CONFIG_USER_SCHED + +static void sched_destroy_user(struct user_struct *up) +{ + sched_destroy_group(up->tg); +} + +static int sched_create_user(struct user_struct *up) +{ + int rc = 0; + + up->tg = sched_create_group(&root_task_group); + if (IS_ERR(up->tg)) + rc = -ENOMEM; + + set_tg_uid(up); + + return rc; +} + +#else /* CONFIG_USER_SCHED */ + +static void sched_destroy_user(struct user_struct *up) { } +static int sched_create_user(struct user_struct *up) { return 0; } + +#endif /* CONFIG_USER_SCHED */ + +#if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS) + +static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) +{ + struct user_struct *user; + struct hlist_node *h; + + hlist_for_each_entry(user, h, hashent, uidhash_node) { + if (user->uid == uid) { + /* possibly resurrect an "almost deleted" object */ + if (atomic_inc_return(&user->__count) == 1) + cancel_delayed_work(&user->work); + return user; + } + } + + return NULL; +} + +static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */ +static DEFINE_MUTEX(uids_mutex); + +static inline void uids_mutex_lock(void) +{ + mutex_lock(&uids_mutex); +} + +static inline void uids_mutex_unlock(void) +{ + mutex_unlock(&uids_mutex); +} + +/* uid directory attributes */ +#ifdef CONFIG_FAIR_GROUP_SCHED +static ssize_t cpu_shares_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct user_struct *up = container_of(kobj, struct user_struct, kobj); + + return sprintf(buf, "%lu\n", sched_group_shares(up->tg)); +} + +static ssize_t cpu_shares_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t size) +{ + struct user_struct *up = container_of(kobj, struct user_struct, kobj); + unsigned long shares; + int rc; + + sscanf(buf, "%lu", &shares); + + rc = sched_group_set_shares(up->tg, shares); + + return (rc ? rc : size); +} + +static struct kobj_attribute cpu_share_attr = + __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store); +#endif + +#ifdef CONFIG_RT_GROUP_SCHED +static ssize_t cpu_rt_runtime_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct user_struct *up = container_of(kobj, struct user_struct, kobj); + + return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg)); +} + +static ssize_t cpu_rt_runtime_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t size) +{ + struct user_struct *up = container_of(kobj, struct user_struct, kobj); + unsigned long rt_runtime; + int rc; + + sscanf(buf, "%ld", &rt_runtime); + + rc = sched_group_set_rt_runtime(up->tg, rt_runtime); + + return (rc ? rc : size); +} + +static struct kobj_attribute cpu_rt_runtime_attr = + __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store); + +static ssize_t cpu_rt_period_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct user_struct *up = container_of(kobj, struct user_struct, kobj); + + return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg)); +} + +static ssize_t cpu_rt_period_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t size) +{ + struct user_struct *up = container_of(kobj, struct user_struct, kobj); + unsigned long rt_period; + int rc; + + sscanf(buf, "%lu", &rt_period); + + rc = sched_group_set_rt_period(up->tg, rt_period); + + return (rc ? rc : size); +} + +static struct kobj_attribute cpu_rt_period_attr = + __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store); +#endif + +/* default attributes per uid directory */ +static struct attribute *uids_attributes[] = { +#ifdef CONFIG_FAIR_GROUP_SCHED + &cpu_share_attr.attr, +#endif +#ifdef CONFIG_RT_GROUP_SCHED + &cpu_rt_runtime_attr.attr, + &cpu_rt_period_attr.attr, +#endif + NULL +}; + +/* the lifetime of user_struct is not managed by the core (now) */ +static void uids_release(struct kobject *kobj) +{ + return; +} + +static struct kobj_type uids_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .default_attrs = uids_attributes, + .release = uids_release, +}; + +/* + * Create /sys/kernel/uids//cpu_share file for this user + * We do not create this file for users in a user namespace (until + * sysfs tagging is implemented). + * + * See Documentation/scheduler/sched-design-CFS.txt for ramifications. + */ +static int uids_user_create(struct user_struct *up) +{ + struct kobject *kobj = &up->kobj; + int error; + + memset(kobj, 0, sizeof(struct kobject)); + if (up->user_ns != &init_user_ns) + return 0; + kobj->kset = uids_kset; + error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); + if (error) { + kobject_put(kobj); + goto done; + } + + kobject_uevent(kobj, KOBJ_ADD); +done: + return error; +} + +/* create these entries in sysfs: + * "/sys/kernel/uids" directory + * "/sys/kernel/uids/0" directory (for root user) + * "/sys/kernel/uids/0/cpu_share" file (for root user) + */ +int __init uids_sysfs_init(void) { - list_add(&up->uidhash_list, hashent); + uids_kset = kset_create_and_add("uids", NULL, kernel_kobj); + if (!uids_kset) + return -ENOMEM; + + return uids_user_create(&root_user); } -static inline void uid_hash_remove(struct user_struct *up) +/* delayed work function to remove sysfs directory for a user and free up + * corresponding structures. + */ +static void cleanup_user_struct(struct work_struct *w) { - list_del(&up->uidhash_list); + struct user_struct *up = container_of(w, struct user_struct, work.work); + unsigned long flags; + int remove_user = 0; + + /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() + * atomic. + */ + uids_mutex_lock(); + + spin_lock_irqsave(&uidhash_lock, flags); + if (atomic_read(&up->__count) == 0) { + uid_hash_remove(up); + remove_user = 1; + } + spin_unlock_irqrestore(&uidhash_lock, flags); + + if (!remove_user) + goto done; + + if (up->user_ns == &init_user_ns) { + kobject_uevent(&up->kobj, KOBJ_REMOVE); + kobject_del(&up->kobj); + kobject_put(&up->kobj); + } + + sched_destroy_user(up); + key_put(up->uid_keyring); + key_put(up->session_keyring); + kmem_cache_free(uid_cachep, up); + +done: + uids_mutex_unlock(); } -static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *hashent) +/* IRQs are disabled and uidhash_lock is held upon function entry. + * IRQ state (as stored in flags) is restored and uidhash_lock released + * upon function exit. + */ +static void free_user(struct user_struct *up, unsigned long flags) { - struct list_head *up; + INIT_DELAYED_WORK(&up->work, cleanup_user_struct); + schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); + spin_unlock_irqrestore(&uidhash_lock, flags); +} - list_for_each(up, hashent) { - struct user_struct *user; +#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ - user = list_entry(up, struct user_struct, uidhash_list); +static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) +{ + struct user_struct *user; + struct hlist_node *h; - if(user->uid == uid) { + hlist_for_each_entry(user, h, hashent, uidhash_node) { + if (user->uid == uid) { atomic_inc(&user->__count); return user; } @@ -80,6 +352,45 @@ static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *has return NULL; } +int uids_sysfs_init(void) { return 0; } +static inline int uids_user_create(struct user_struct *up) { return 0; } +static inline void uids_mutex_lock(void) { } +static inline void uids_mutex_unlock(void) { } + +/* IRQs are disabled and uidhash_lock is held upon function entry. + * IRQ state (as stored in flags) is restored and uidhash_lock released + * upon function exit. + */ +static void free_user(struct user_struct *up, unsigned long flags) +{ + uid_hash_remove(up); + spin_unlock_irqrestore(&uidhash_lock, flags); + sched_destroy_user(up); + key_put(up->uid_keyring); + key_put(up->session_keyring); + kmem_cache_free(uid_cachep, up); +} + +#endif + +#if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED) +/* + * We need to check if a setuid can take place. This function should be called + * before successfully completing the setuid. + */ +int task_can_switch_user(struct user_struct *up, struct task_struct *tsk) +{ + + return sched_rt_can_attach(up->tg, tsk); + +} +#else +int task_can_switch_user(struct user_struct *up, struct task_struct *tsk) +{ + return 1; +} +#endif + /* * Locate the user_struct for the passed UID. If found, take a ref on it. The * caller must undo that ref with free_uid(). @@ -89,66 +400,71 @@ static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *has struct user_struct *find_user(uid_t uid) { struct user_struct *ret; + unsigned long flags; + struct user_namespace *ns = current_user_ns(); - spin_lock_bh(&uidhash_lock); - ret = uid_hash_find(uid, uidhashentry(uid)); - spin_unlock_bh(&uidhash_lock); + spin_lock_irqsave(&uidhash_lock, flags); + ret = uid_hash_find(uid, uidhashentry(ns, uid)); + spin_unlock_irqrestore(&uidhash_lock, flags); return ret; } void free_uid(struct user_struct *up) { - local_bh_disable(); - if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) { - uid_hash_remove(up); - key_put(up->uid_keyring); - key_put(up->session_keyring); - kmem_cache_free(uid_cachep, up); - spin_unlock(&uidhash_lock); - } - local_bh_enable(); + unsigned long flags; + + if (!up) + return; + + local_irq_save(flags); + if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) + free_user(up, flags); + else + local_irq_restore(flags); } -struct user_struct * alloc_uid(uid_t uid) +struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) { - struct list_head *hashent = uidhashentry(uid); - struct user_struct *up; + struct hlist_head *hashent = uidhashentry(ns, uid); + struct user_struct *up, *new; - spin_lock_bh(&uidhash_lock); + /* Make uid_hash_find() + uids_user_create() + uid_hash_insert() + * atomic. + */ + uids_mutex_lock(); + + spin_lock_irq(&uidhash_lock); up = uid_hash_find(uid, hashent); - spin_unlock_bh(&uidhash_lock); + spin_unlock_irq(&uidhash_lock); if (!up) { - struct user_struct *new; - - new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL); + new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); if (!new) - return NULL; + goto out_unlock; + new->uid = uid; atomic_set(&new->__count, 1); - atomic_set(&new->processes, 0); - atomic_set(&new->files, 0); - atomic_set(&new->sigpending, 0); -#ifdef CONFIG_INOTIFY - atomic_set(&new->inotify_watches, 0); - atomic_set(&new->inotify_devs, 0); -#endif - new->mq_bytes = 0; - new->locked_shm = 0; + if (sched_create_user(new) < 0) + goto out_free_user; - if (alloc_uid_keyring(new) < 0) { - kmem_cache_free(uid_cachep, new); - return NULL; - } + new->user_ns = get_user_ns(ns); + + if (uids_user_create(new)) + goto out_destoy_sched; /* * Before adding this, check whether we raced * on adding the same user already.. */ - spin_lock_bh(&uidhash_lock); + spin_lock_irq(&uidhash_lock); up = uid_hash_find(uid, hashent); if (up) { + /* This case is not possible when CONFIG_USER_SCHED + * is defined, since we serialize alloc_uid() using + * uids_mutex. Hence no need to call + * sched_destroy_user() or remove_user_sysfs_dir(). + */ key_put(new->uid_keyring); key_put(new->session_keyring); kmem_cache_free(uid_cachep, new); @@ -156,45 +472,37 @@ struct user_struct * alloc_uid(uid_t uid) uid_hash_insert(new, hashent); up = new; } - spin_unlock_bh(&uidhash_lock); - + spin_unlock_irq(&uidhash_lock); } - return up; -} -void switch_uid(struct user_struct *new_user) -{ - struct user_struct *old_user; + uids_mutex_unlock(); - /* What if a process setreuid()'s and this brings the - * new uid over his NPROC rlimit? We can check this now - * cheaply with the new uid cache, so if it matters - * we should be checking for it. -DaveM - */ - old_user = current->user; - atomic_inc(&new_user->processes); - atomic_dec(&old_user->processes); - switch_uid_keyring(new_user); - current->user = new_user; - free_uid(old_user); - suid_keys(current); -} + return up; +out_destoy_sched: + sched_destroy_user(new); + put_user_ns(new->user_ns); +out_free_user: + kmem_cache_free(uid_cachep, new); +out_unlock: + uids_mutex_unlock(); + return NULL; +} static int __init uid_cache_init(void) { int n; uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), - 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); for(n = 0; n < UIDHASH_SZ; ++n) - INIT_LIST_HEAD(uidhash_table + n); + INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); /* Insert the root user immediately (init already runs as root) */ - spin_lock_bh(&uidhash_lock); - uid_hash_insert(&root_user, uidhashentry(0)); - spin_unlock_bh(&uidhash_lock); + spin_lock_irq(&uidhash_lock); + uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); + spin_unlock_irq(&uidhash_lock); return 0; }