#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/user_namespace.h>
+#include "cred-internals.h"
+
+struct user_namespace init_user_ns = {
+ .kref = {
+ .refcount = ATOMIC_INIT(2),
+ },
+ .creator = &root_user,
+};
+EXPORT_SYMBOL_GPL(init_user_ns);
/*
* UID task count cache, to get fast user lookup in "alloc_uid"
*/
static DEFINE_SPINLOCK(uidhash_lock);
+/* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */
struct user_struct root_user = {
- .__count = ATOMIC_INIT(1),
+ .__count = ATOMIC_INIT(2),
.processes = ATOMIC_INIT(1),
.files = ATOMIC_INIT(0),
.sigpending = ATOMIC_INIT(0),
- .mq_bytes = 0,
.locked_shm = 0,
-#ifdef CONFIG_KEYS
- .uid_keyring = &root_user_keyring,
- .session_keyring = &root_session_keyring,
-#endif
+ .user_ns = &init_user_ns,
};
/*
* These routines must be called with the uidhash spinlock held!
*/
-static inline void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
+static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
{
hlist_add_head(&up->uidhash_node, hashent);
}
-static inline void uid_hash_remove(struct user_struct *up)
+static void uid_hash_remove(struct user_struct *up)
{
hlist_del_init(&up->uidhash_node);
+ put_user_ns(up->user_ns);
}
-static inline struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
+static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
{
struct user_struct *user;
struct hlist_node *h;
hlist_for_each_entry(user, h, hashent, uidhash_node) {
- if(user->uid == uid) {
+ if (user->uid == uid) {
atomic_inc(&user->__count);
return user;
}
return NULL;
}
+/* IRQs are disabled and uidhash_lock is held upon function entry.
+ * IRQ state (as stored in flags) is restored and uidhash_lock released
+ * upon function exit.
+ */
+static void free_user(struct user_struct *up, unsigned long flags)
+{
+ uid_hash_remove(up);
+ spin_unlock_irqrestore(&uidhash_lock, flags);
+ key_put(up->uid_keyring);
+ key_put(up->session_keyring);
+ kmem_cache_free(uid_cachep, up);
+}
+
/*
* Locate the user_struct for the passed UID. If found, take a ref on it. The
* caller must undo that ref with free_uid().
{
struct user_struct *ret;
unsigned long flags;
- struct user_namespace *ns = current->nsproxy->user_ns;
+ struct user_namespace *ns = current_user_ns();
spin_lock_irqsave(&uidhash_lock, flags);
ret = uid_hash_find(uid, uidhashentry(ns, uid));
return;
local_irq_save(flags);
- if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
- uid_hash_remove(up);
- spin_unlock_irqrestore(&uidhash_lock, flags);
- key_put(up->uid_keyring);
- key_put(up->session_keyring);
- kmem_cache_free(uid_cachep, up);
- } else {
+ if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
+ free_user(up, flags);
+ else
local_irq_restore(flags);
- }
}
-struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
+struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
{
struct hlist_head *hashent = uidhashentry(ns, uid);
- struct user_struct *up;
+ struct user_struct *up, *new;
+ /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
+ * atomic.
+ */
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
spin_unlock_irq(&uidhash_lock);
if (!up) {
- struct user_struct *new;
-
- new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
+ new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
if (!new)
- return NULL;
+ goto out_unlock;
+
new->uid = uid;
atomic_set(&new->__count, 1);
- atomic_set(&new->processes, 0);
- atomic_set(&new->files, 0);
- atomic_set(&new->sigpending, 0);
-#ifdef CONFIG_INOTIFY_USER
- atomic_set(&new->inotify_watches, 0);
- atomic_set(&new->inotify_devs, 0);
-#endif
-
- new->mq_bytes = 0;
- new->locked_shm = 0;
-
- if (alloc_uid_keyring(new, current) < 0) {
- kmem_cache_free(uid_cachep, new);
- return NULL;
- }
+
+ new->user_ns = get_user_ns(ns);
/*
* Before adding this, check whether we raced
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
+ /* This case is not possible when CONFIG_USER_SCHED
+ * is defined, since we serialize alloc_uid() using
+ * uids_mutex. Hence no need to call
+ * sched_destroy_user() or remove_user_sysfs_dir().
+ */
key_put(new->uid_keyring);
key_put(new->session_keyring);
kmem_cache_free(uid_cachep, new);
up = new;
}
spin_unlock_irq(&uidhash_lock);
-
}
- return up;
-}
-void switch_uid(struct user_struct *new_user)
-{
- struct user_struct *old_user;
-
- /* What if a process setreuid()'s and this brings the
- * new uid over his NPROC rlimit? We can check this now
- * cheaply with the new uid cache, so if it matters
- * we should be checking for it. -DaveM
- */
- old_user = current->user;
- atomic_inc(&new_user->processes);
- atomic_dec(&old_user->processes);
- switch_uid_keyring(new_user);
- current->user = new_user;
-
- /*
- * We need to synchronize with __sigqueue_alloc()
- * doing a get_uid(p->user).. If that saw the old
- * user value, we need to wait until it has exited
- * its critical region before we can free the old
- * structure.
- */
- smp_mb();
- spin_unlock_wait(¤t->sighand->siglock);
-
- free_uid(old_user);
- suid_keys(current);
-}
-
-void release_uids(struct user_namespace *ns)
-{
- int i;
- unsigned long flags;
- struct hlist_head *head;
- struct hlist_node *nd;
-
- spin_lock_irqsave(&uidhash_lock, flags);
- /*
- * collapse the chains so that the user_struct-s will
- * be still alive, but not in hashes. subsequent free_uid()
- * will free them.
- */
- for (i = 0; i < UIDHASH_SZ; i++) {
- head = ns->uidhash_table + i;
- while (!hlist_empty(head)) {
- nd = head->first;
- hlist_del_init(nd);
- }
- }
- spin_unlock_irqrestore(&uidhash_lock, flags);
+ return up;
- free_uid(ns->root_user);
+ put_user_ns(new->user_ns);
+ kmem_cache_free(uid_cachep, new);
+out_unlock:
+ return NULL;
}
static int __init uid_cache_init(void)