X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Fuser.c;h=8e1c8c0a496c14e7b154ed2d53a10ef6eb3b34b2;hb=8c7b09f43f4bf570654bcc458ce96819a932303c;hp=9ca2848fc35676fa8ebac52f20c33f7f056d09ff;hpb=28f300d23674fa01ae747c66ce861d4ee6aebe8c;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/user.c b/kernel/user.c index 9ca2848..8e1c8c0 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -17,6 +17,14 @@ #include #include +struct user_namespace init_user_ns = { + .kref = { + .refcount = ATOMIC_INIT(2), + }, + .creator = &root_user, +}; +EXPORT_SYMBOL_GPL(init_user_ns); + /* * UID task count cache, to get fast user lookup in "alloc_uid" * when changing user ID's (ie setuid() and friends). @@ -39,39 +47,37 @@ static struct kmem_cache *uid_cachep; */ static DEFINE_SPINLOCK(uidhash_lock); +/* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */ struct user_struct root_user = { - .__count = ATOMIC_INIT(1), + .__count = ATOMIC_INIT(2), .processes = ATOMIC_INIT(1), .files = ATOMIC_INIT(0), .sigpending = ATOMIC_INIT(0), - .mq_bytes = 0, .locked_shm = 0, -#ifdef CONFIG_KEYS - .uid_keyring = &root_user_keyring, - .session_keyring = &root_session_keyring, -#endif + .user_ns = &init_user_ns, }; /* * These routines must be called with the uidhash spinlock held! */ -static inline void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) +static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) { hlist_add_head(&up->uidhash_node, hashent); } -static inline void uid_hash_remove(struct user_struct *up) +static void uid_hash_remove(struct user_struct *up) { hlist_del_init(&up->uidhash_node); + put_user_ns(up->user_ns); } -static inline struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) +static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) { struct user_struct *user; struct hlist_node *h; hlist_for_each_entry(user, h, hashent, uidhash_node) { - if(user->uid == uid) { + if (user->uid == uid) { atomic_inc(&user->__count); return user; } @@ -80,6 +86,19 @@ static inline struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *ha return NULL; } +/* IRQs are disabled and uidhash_lock is held upon function entry. + * IRQ state (as stored in flags) is restored and uidhash_lock released + * upon function exit. + */ +static void free_user(struct user_struct *up, unsigned long flags) +{ + uid_hash_remove(up); + spin_unlock_irqrestore(&uidhash_lock, flags); + key_put(up->uid_keyring); + key_put(up->session_keyring); + kmem_cache_free(uid_cachep, up); +} + /* * Locate the user_struct for the passed UID. If found, take a ref on it. The * caller must undo that ref with free_uid(). @@ -90,7 +109,7 @@ struct user_struct *find_user(uid_t uid) { struct user_struct *ret; unsigned long flags; - struct user_namespace *ns = current->nsproxy->user_ns; + struct user_namespace *ns = current_user_ns(); spin_lock_irqsave(&uidhash_lock, flags); ret = uid_hash_find(uid, uidhashentry(ns, uid)); @@ -106,49 +125,31 @@ void free_uid(struct user_struct *up) return; local_irq_save(flags); - if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { - uid_hash_remove(up); - spin_unlock_irqrestore(&uidhash_lock, flags); - key_put(up->uid_keyring); - key_put(up->session_keyring); - kmem_cache_free(uid_cachep, up); - } else { + if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) + free_user(up, flags); + else local_irq_restore(flags); - } } -struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) +struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) { struct hlist_head *hashent = uidhashentry(ns, uid); - struct user_struct *up; + struct user_struct *up, *new; + /* Make uid_hash_find() + uid_hash_insert() atomic. */ spin_lock_irq(&uidhash_lock); up = uid_hash_find(uid, hashent); spin_unlock_irq(&uidhash_lock); if (!up) { - struct user_struct *new; - - new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); + new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); if (!new) - return NULL; + goto out_unlock; + new->uid = uid; atomic_set(&new->__count, 1); - atomic_set(&new->processes, 0); - atomic_set(&new->files, 0); - atomic_set(&new->sigpending, 0); -#ifdef CONFIG_INOTIFY_USER - atomic_set(&new->inotify_watches, 0); - atomic_set(&new->inotify_devs, 0); -#endif - - new->mq_bytes = 0; - new->locked_shm = 0; - - if (alloc_uid_keyring(new, current) < 0) { - kmem_cache_free(uid_cachep, new); - return NULL; - } + + new->user_ns = get_user_ns(ns); /* * Before adding this, check whether we raced @@ -165,63 +166,12 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) up = new; } spin_unlock_irq(&uidhash_lock); - } - return up; -} - -void switch_uid(struct user_struct *new_user) -{ - struct user_struct *old_user; - - /* What if a process setreuid()'s and this brings the - * new uid over his NPROC rlimit? We can check this now - * cheaply with the new uid cache, so if it matters - * we should be checking for it. -DaveM - */ - old_user = current->user; - atomic_inc(&new_user->processes); - atomic_dec(&old_user->processes); - switch_uid_keyring(new_user); - current->user = new_user; - - /* - * We need to synchronize with __sigqueue_alloc() - * doing a get_uid(p->user).. If that saw the old - * user value, we need to wait until it has exited - * its critical region before we can free the old - * structure. - */ - smp_mb(); - spin_unlock_wait(¤t->sighand->siglock); - - free_uid(old_user); - suid_keys(current); -} - -void release_uids(struct user_namespace *ns) -{ - int i; - unsigned long flags; - struct hlist_head *head; - struct hlist_node *nd; - spin_lock_irqsave(&uidhash_lock, flags); - /* - * collapse the chains so that the user_struct-s will - * be still alive, but not in hashes. subsequent free_uid() - * will free them. - */ - for (i = 0; i < UIDHASH_SZ; i++) { - head = ns->uidhash_table + i; - while (!hlist_empty(head)) { - nd = head->first; - hlist_del_init(nd); - } - } - spin_unlock_irqrestore(&uidhash_lock, flags); + return up; - free_uid(ns->root_user); +out_unlock: + return NULL; } static int __init uid_cache_init(void)