* Heavily rewritten.
*/
-#include <linux/config.h>
#include <linux/syscalls.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/quotaops.h>
#include <linux/acct.h>
+#include <linux/capability.h>
#include <linux/module.h>
+#include <linux/sysfs.h>
#include <linux/seq_file.h>
-#include <linux/namespace.h>
+#include <linux/mnt_namespace.h>
#include <linux/namei.h>
#include <linux/security.h>
#include <linux/mount.h>
+#include <linux/ramfs.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include "pnode.h"
-
-extern int __init init_rootfs(void);
-
-#ifdef CONFIG_SYSFS
-extern int __init sysfs_init(void);
-#else
-static inline int sysfs_init(void)
-{
- return 0;
-}
-#endif
+#include "internal.h"
/* spinlock for vfsmount related operations, inplace of dcache_lock */
__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
static int event;
-static struct list_head *mount_hashtable;
+static struct list_head *mount_hashtable __read_mostly;
static int hash_mask __read_mostly, hash_bits __read_mostly;
-static kmem_cache_t *mnt_cache;
+static struct kmem_cache *mnt_cache __read_mostly;
static struct rw_semaphore namespace_sem;
+/* /sys/fs */
+decl_subsys(fs, NULL, NULL);
+EXPORT_SYMBOL_GPL(fs_subsys);
+
static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
{
unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
struct vfsmount *alloc_vfsmnt(const char *name)
{
- struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL);
+ struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
if (mnt) {
- memset(mnt, 0, sizeof(struct vfsmount));
atomic_set(&mnt->mnt_count, 1);
INIT_LIST_HEAD(&mnt->mnt_hash);
INIT_LIST_HEAD(&mnt->mnt_child);
return mnt;
}
+int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
+{
+ mnt->mnt_sb = sb;
+ mnt->mnt_root = dget(sb->s_root);
+ return 0;
+}
+
+EXPORT_SYMBOL(simple_set_mnt);
+
void free_vfsmnt(struct vfsmount *mnt)
{
kfree(mnt->mnt_devname);
static inline int check_mnt(struct vfsmount *mnt)
{
- return mnt->mnt_namespace == current->namespace;
+ return mnt->mnt_ns == current->nsproxy->mnt_ns;
}
-static void touch_namespace(struct namespace *ns)
+static void touch_mnt_namespace(struct mnt_namespace *ns)
{
if (ns) {
ns->event = ++event;
}
}
-static void __touch_namespace(struct namespace *ns)
+static void __touch_mnt_namespace(struct mnt_namespace *ns)
{
if (ns && ns->event != event) {
ns->event = event;
struct vfsmount *parent = mnt->mnt_parent;
struct vfsmount *m;
LIST_HEAD(head);
- struct namespace *n = parent->mnt_namespace;
+ struct mnt_namespace *n = parent->mnt_ns;
BUG_ON(parent == mnt);
list_add_tail(&head, &mnt->mnt_list);
list_for_each_entry(m, &head, mnt_list)
- m->mnt_namespace = n;
+ m->mnt_ns = n;
list_splice(&head, n->list.prev);
list_add_tail(&mnt->mnt_hash, mount_hashtable +
hash(parent, mnt->mnt_mountpoint));
list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
- touch_namespace(n);
+ touch_mnt_namespace(n);
}
static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
list_add(&mnt->mnt_slave, &old->mnt_slave_list);
mnt->mnt_master = old;
CLEAR_MNT_SHARED(mnt);
- } else {
+ } else if (!(flag & CL_PRIVATE)) {
if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old))
list_add(&mnt->mnt_share, &old->mnt_share);
if (IS_MNT_SLAVE(old))
/* iterator */
static void *m_start(struct seq_file *m, loff_t *pos)
{
- struct namespace *n = m->private;
- struct list_head *p;
- loff_t l = *pos;
+ struct mnt_namespace *n = m->private;
down_read(&namespace_sem);
- list_for_each(p, &n->list)
- if (!l--)
- return list_entry(p, struct vfsmount, mnt_list);
- return NULL;
+ return seq_list_start(&n->list, *pos);
}
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct namespace *n = m->private;
- struct list_head *p = ((struct vfsmount *)v)->mnt_list.next;
- (*pos)++;
- return p == &n->list ? NULL : list_entry(p, struct vfsmount, mnt_list);
+ struct mnt_namespace *n = m->private;
+
+ return seq_list_next(v, &n->list, pos);
}
static void m_stop(struct seq_file *m, void *v)
static int show_vfsmnt(struct seq_file *m, void *v)
{
- struct vfsmount *mnt = v;
+ struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
int err = 0;
static struct proc_fs_info {
int flag;
{ MS_SYNCHRONOUS, ",sync" },
{ MS_DIRSYNC, ",dirsync" },
{ MS_MANDLOCK, ",mand" },
- { MS_NOATIME, ",noatime" },
- { MS_NODIRATIME, ",nodiratime" },
{ 0, NULL }
};
static struct proc_fs_info mnt_info[] = {
{ MNT_NOSUID, ",nosuid" },
{ MNT_NODEV, ",nodev" },
{ MNT_NOEXEC, ",noexec" },
+ { MNT_NOATIME, ",noatime" },
+ { MNT_NODIRATIME, ",nodiratime" },
+ { MNT_RELATIME, ",relatime" },
{ 0, NULL }
};
struct proc_fs_info *fs_infop;
seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
seq_putc(m, ' ');
mangle(m, mnt->mnt_sb->s_type->name);
+ if (mnt->mnt_sb->s_subtype && mnt->mnt_sb->s_subtype[0]) {
+ seq_putc(m, '.');
+ mangle(m, mnt->mnt_sb->s_subtype);
+ }
seq_puts(m, mnt->mnt_sb->s_flags & MS_RDONLY ? " ro" : " rw");
for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
if (mnt->mnt_sb->s_flags & fs_infop->flag)
.show = show_vfsmnt
};
+static int show_vfsstat(struct seq_file *m, void *v)
+{
+ struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
+ int err = 0;
+
+ /* device */
+ if (mnt->mnt_devname) {
+ seq_puts(m, "device ");
+ mangle(m, mnt->mnt_devname);
+ } else
+ seq_puts(m, "no device");
+
+ /* mount point */
+ seq_puts(m, " mounted on ");
+ seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
+ seq_putc(m, ' ');
+
+ /* file system type */
+ seq_puts(m, "with fstype ");
+ mangle(m, mnt->mnt_sb->s_type->name);
+
+ /* optional statistics */
+ if (mnt->mnt_sb->s_op->show_stats) {
+ seq_putc(m, ' ');
+ err = mnt->mnt_sb->s_op->show_stats(m, mnt);
+ }
+
+ seq_putc(m, '\n');
+ return err;
+}
+
+struct seq_operations mountstats_op = {
+ .start = m_start,
+ .next = m_next,
+ .stop = m_stop,
+ .show = show_vfsstat,
+};
+
/**
* may_umount_tree - check if a mount tree is busy
* @mnt: root of mount tree
spin_unlock(&vfsmount_lock);
if (actual_refs > minimum_refs)
- return -EBUSY;
+ return 0;
- return 0;
+ return 1;
}
EXPORT_SYMBOL(may_umount_tree);
*/
int may_umount(struct vfsmount *mnt)
{
- int ret = 0;
+ int ret = 1;
spin_lock(&vfsmount_lock);
if (propagate_mount_busy(mnt, 2))
- ret = -EBUSY;
+ ret = 0;
spin_unlock(&vfsmount_lock);
return ret;
}
void release_mounts(struct list_head *head)
{
struct vfsmount *mnt;
- while(!list_empty(head)) {
- mnt = list_entry(head->next, struct vfsmount, mnt_hash);
+ while (!list_empty(head)) {
+ mnt = list_first_entry(head, struct vfsmount, mnt_hash);
list_del_init(&mnt->mnt_hash);
if (mnt->mnt_parent != mnt) {
struct dentry *dentry;
{
struct vfsmount *p;
- for (p = mnt; p; p = next_mnt(p, mnt)) {
- list_del(&p->mnt_hash);
- list_add(&p->mnt_hash, kill);
- }
+ for (p = mnt; p; p = next_mnt(p, mnt))
+ list_move(&p->mnt_hash, kill);
if (propagate)
propagate_umount(kill);
list_for_each_entry(p, kill, mnt_hash) {
list_del_init(&p->mnt_expire);
list_del_init(&p->mnt_list);
- __touch_namespace(p->mnt_namespace);
- p->mnt_namespace = NULL;
+ __touch_mnt_namespace(p->mnt_ns);
+ p->mnt_ns = NULL;
list_del_init(&p->mnt_child);
if (p->mnt_parent != p)
- mnt->mnt_mountpoint->d_mounted--;
+ p->mnt_mountpoint->d_mounted--;
change_mnt_propagation(p, MS_PRIVATE);
}
}
*/
lock_kernel();
- if ((flags & MNT_FORCE) && sb->s_op->umount_begin)
- sb->s_op->umount_begin(sb);
+ if (sb->s_op->umount_begin)
+ sb->s_op->umount_begin(mnt, flags);
unlock_kernel();
/*
return NULL;
}
+struct vfsmount *collect_mounts(struct vfsmount *mnt, struct dentry *dentry)
+{
+ struct vfsmount *tree;
+ down_read(&namespace_sem);
+ tree = copy_tree(mnt, dentry, CL_COPY_ALL | CL_PRIVATE);
+ up_read(&namespace_sem);
+ return tree;
+}
+
+void drop_collected_mounts(struct vfsmount *mnt)
+{
+ LIST_HEAD(umount_list);
+ down_read(&namespace_sem);
+ spin_lock(&vfsmount_lock);
+ umount_tree(mnt, 0, &umount_list);
+ spin_unlock(&vfsmount_lock);
+ up_read(&namespace_sem);
+ release_mounts(&umount_list);
+}
+
/*
* @source_mnt : mount tree to be attached
* @nd : place the mount tree @source_mnt is attached
if (parent_nd) {
detach_mnt(source_mnt, parent_nd);
attach_mnt(source_mnt, nd);
- touch_namespace(current->namespace);
+ touch_mnt_namespace(current->nsproxy->mnt_ns);
} else {
mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
commit_tree(source_mnt);
return -ENOTDIR;
err = -ENOENT;
- down(&nd->dentry->d_inode->i_sem);
+ mutex_lock(&nd->dentry->d_inode->i_mutex);
if (IS_DEADDIR(nd->dentry->d_inode))
goto out_unlock;
if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry))
err = attach_recursive_mnt(mnt, nd, NULL);
out_unlock:
- up(&nd->dentry->d_inode->i_sem);
+ mutex_unlock(&nd->dentry->d_inode->i_mutex);
if (!err)
security_sb_post_addmount(mnt, nd);
return err;
int recurse = flag & MS_REC;
int type = flag & ~MS_REC;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if (nd->dentry != nd->mnt->mnt_root)
return -EINVAL;
goto out;
err = -ENOENT;
- down(&nd->dentry->d_inode->i_sem);
+ mutex_lock(&nd->dentry->d_inode->i_mutex);
if (IS_DEADDIR(nd->dentry->d_inode))
goto out1;
list_del_init(&old_nd.mnt->mnt_expire);
spin_unlock(&vfsmount_lock);
out1:
- up(&nd->dentry->d_inode->i_sem);
+ mutex_unlock(&nd->dentry->d_inode->i_mutex);
out:
up_write(&namespace_sem);
if (!err)
*/
if (!propagate_mount_busy(mnt, 2)) {
/* delete from the namespace */
- touch_namespace(mnt->mnt_namespace);
+ touch_mnt_namespace(mnt->mnt_ns);
list_del_init(&mnt->mnt_list);
- mnt->mnt_namespace = NULL;
+ mnt->mnt_ns = NULL;
umount_tree(mnt, 1, umounts);
spin_unlock(&vfsmount_lock);
} else {
}
/*
+ * go through the vfsmounts we've just consigned to the graveyard to
+ * - check that they're still dead
+ * - delete the vfsmount from the appropriate namespace under lock
+ * - dispose of the corpse
+ */
+static void expire_mount_list(struct list_head *graveyard, struct list_head *mounts)
+{
+ struct mnt_namespace *ns;
+ struct vfsmount *mnt;
+
+ while (!list_empty(graveyard)) {
+ LIST_HEAD(umounts);
+ mnt = list_first_entry(graveyard, struct vfsmount, mnt_expire);
+ list_del_init(&mnt->mnt_expire);
+
+ /* don't do anything if the namespace is dead - all the
+ * vfsmounts from it are going away anyway */
+ ns = mnt->mnt_ns;
+ if (!ns || !ns->root)
+ continue;
+ get_mnt_ns(ns);
+
+ spin_unlock(&vfsmount_lock);
+ down_write(&namespace_sem);
+ expire_mount(mnt, mounts, &umounts);
+ up_write(&namespace_sem);
+ release_mounts(&umounts);
+ mntput(mnt);
+ put_mnt_ns(ns);
+ spin_lock(&vfsmount_lock);
+ }
+}
+
+/*
* process a list of expirable mountpoints with the intent of discarding any
* mountpoints that aren't in use and haven't been touched since last we came
* here
*/
void mark_mounts_for_expiry(struct list_head *mounts)
{
- struct namespace *namespace;
struct vfsmount *mnt, *next;
LIST_HEAD(graveyard);
list_move(&mnt->mnt_expire, &graveyard);
}
- /*
- * go through the vfsmounts we've just consigned to the graveyard to
- * - check that they're still dead
- * - delete the vfsmount from the appropriate namespace under lock
- * - dispose of the corpse
- */
- while (!list_empty(&graveyard)) {
- LIST_HEAD(umounts);
- mnt = list_entry(graveyard.next, struct vfsmount, mnt_expire);
- list_del_init(&mnt->mnt_expire);
+ expire_mount_list(&graveyard, mounts);
- /* don't do anything if the namespace is dead - all the
- * vfsmounts from it are going away anyway */
- namespace = mnt->mnt_namespace;
- if (!namespace || !namespace->root)
+ spin_unlock(&vfsmount_lock);
+}
+
+EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
+
+/*
+ * Ripoff of 'select_parent()'
+ *
+ * search the list of submounts for a given mountpoint, and move any
+ * shrinkable submounts to the 'graveyard' list.
+ */
+static int select_submounts(struct vfsmount *parent, struct list_head *graveyard)
+{
+ struct vfsmount *this_parent = parent;
+ struct list_head *next;
+ int found = 0;
+
+repeat:
+ next = this_parent->mnt_mounts.next;
+resume:
+ while (next != &this_parent->mnt_mounts) {
+ struct list_head *tmp = next;
+ struct vfsmount *mnt = list_entry(tmp, struct vfsmount, mnt_child);
+
+ next = tmp->next;
+ if (!(mnt->mnt_flags & MNT_SHRINKABLE))
continue;
- get_namespace(namespace);
+ /*
+ * Descend a level if the d_mounts list is non-empty.
+ */
+ if (!list_empty(&mnt->mnt_mounts)) {
+ this_parent = mnt;
+ goto repeat;
+ }
- spin_unlock(&vfsmount_lock);
- down_write(&namespace_sem);
- expire_mount(mnt, mounts, &umounts);
- up_write(&namespace_sem);
- release_mounts(&umounts);
- mntput(mnt);
- put_namespace(namespace);
- spin_lock(&vfsmount_lock);
+ if (!propagate_mount_busy(mnt, 1)) {
+ mntget(mnt);
+ list_move_tail(&mnt->mnt_expire, graveyard);
+ found++;
+ }
}
+ /*
+ * All done at this level ... ascend and resume the search
+ */
+ if (this_parent != parent) {
+ next = this_parent->mnt_child.next;
+ this_parent = this_parent->mnt_parent;
+ goto resume;
+ }
+ return found;
+}
+
+/*
+ * process a list of expirable mountpoints with the intent of discarding any
+ * submounts of a specific parent mountpoint
+ */
+void shrink_submounts(struct vfsmount *mountpoint, struct list_head *mounts)
+{
+ LIST_HEAD(graveyard);
+ int found;
+
+ spin_lock(&vfsmount_lock);
+
+ /* extract submounts of 'mountpoint' from the expiration list */
+ while ((found = select_submounts(mountpoint, &graveyard)) != 0)
+ expire_mount_list(&graveyard, mounts);
spin_unlock(&vfsmount_lock);
}
-EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
+EXPORT_SYMBOL_GPL(shrink_submounts);
/*
* Some copy_from_user() implementations do not return the exact number of
mnt_flags |= MNT_NODEV;
if (flags & MS_NOEXEC)
mnt_flags |= MNT_NOEXEC;
- flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE);
+ if (flags & MS_NOATIME)
+ mnt_flags |= MNT_NOATIME;
+ if (flags & MS_NODIRATIME)
+ mnt_flags |= MNT_NODIRATIME;
+ if (flags & MS_RELATIME)
+ mnt_flags |= MNT_RELATIME;
+
+ flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
+ MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT);
/* ... and get the mountpoint */
retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
return retval;
}
-int copy_namespace(int flags, struct task_struct *tsk)
+/*
+ * Allocate a new namespace structure and populate it with contents
+ * copied from the namespace of the passed in task structure.
+ */
+static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
+ struct fs_struct *fs)
{
- struct namespace *namespace = tsk->namespace;
- struct namespace *new_ns;
+ struct mnt_namespace *new_ns;
struct vfsmount *rootmnt = NULL, *pwdmnt = NULL, *altrootmnt = NULL;
- struct fs_struct *fs = tsk->fs;
struct vfsmount *p, *q;
- if (!namespace)
- return 0;
-
- get_namespace(namespace);
-
- if (!(flags & CLONE_NEWNS))
- return 0;
-
- if (!capable(CAP_SYS_ADMIN)) {
- put_namespace(namespace);
- return -EPERM;
- }
-
- new_ns = kmalloc(sizeof(struct namespace), GFP_KERNEL);
+ new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
if (!new_ns)
- goto out;
+ return ERR_PTR(-ENOMEM);
atomic_set(&new_ns->count, 1);
INIT_LIST_HEAD(&new_ns->list);
down_write(&namespace_sem);
/* First pass: copy the tree topology */
- new_ns->root = copy_tree(namespace->root, namespace->root->mnt_root,
+ new_ns->root = copy_tree(mnt_ns->root, mnt_ns->root->mnt_root,
CL_COPY_ALL | CL_EXPIRE);
if (!new_ns->root) {
up_write(&namespace_sem);
kfree(new_ns);
- goto out;
+ return ERR_PTR(-ENOMEM);;
}
spin_lock(&vfsmount_lock);
list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
* as belonging to new namespace. We have already acquired a private
* fs_struct, so tsk->fs->lock is not needed.
*/
- p = namespace->root;
+ p = mnt_ns->root;
q = new_ns->root;
while (p) {
- q->mnt_namespace = new_ns;
+ q->mnt_ns = new_ns;
if (fs) {
if (p == fs->rootmnt) {
rootmnt = p;
fs->altrootmnt = mntget(q);
}
}
- p = next_mnt(p, namespace->root);
+ p = next_mnt(p, mnt_ns->root);
q = next_mnt(q, new_ns->root);
}
up_write(&namespace_sem);
- tsk->namespace = new_ns;
-
if (rootmnt)
mntput(rootmnt);
if (pwdmnt)
if (altrootmnt)
mntput(altrootmnt);
- put_namespace(namespace);
- return 0;
+ return new_ns;
+}
-out:
- put_namespace(namespace);
- return -ENOMEM;
+struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
+ struct fs_struct *new_fs)
+{
+ struct mnt_namespace *new_ns;
+
+ BUG_ON(!ns);
+ get_mnt_ns(ns);
+
+ if (!(flags & CLONE_NEWNS))
+ return ns;
+
+ new_ns = dup_mnt_ns(ns, new_fs);
+
+ put_mnt_ns(ns);
+ return new_ns;
}
asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name,
user_nd.dentry = dget(current->fs->root);
read_unlock(¤t->fs->lock);
down_write(&namespace_sem);
- down(&old_nd.dentry->d_inode->i_sem);
+ mutex_lock(&old_nd.dentry->d_inode->i_mutex);
error = -EINVAL;
if (IS_MNT_SHARED(old_nd.mnt) ||
IS_MNT_SHARED(new_nd.mnt->mnt_parent) ||
detach_mnt(user_nd.mnt, &root_parent);
attach_mnt(user_nd.mnt, &old_nd); /* mount old root on put_old */
attach_mnt(new_nd.mnt, &root_parent); /* mount new_root on / */
- touch_namespace(current->namespace);
+ touch_mnt_namespace(current->nsproxy->mnt_ns);
spin_unlock(&vfsmount_lock);
chroot_fs_refs(&user_nd, &new_nd);
security_sb_post_pivotroot(&user_nd, &new_nd);
path_release(&root_parent);
path_release(&parent_nd);
out2:
- up(&old_nd.dentry->d_inode->i_sem);
+ mutex_unlock(&old_nd.dentry->d_inode->i_mutex);
up_write(&namespace_sem);
path_release(&user_nd);
path_release(&old_nd);
static void __init init_mount_tree(void)
{
struct vfsmount *mnt;
- struct namespace *namespace;
- struct task_struct *g, *p;
+ struct mnt_namespace *ns;
mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
if (IS_ERR(mnt))
panic("Can't create rootfs");
- namespace = kmalloc(sizeof(*namespace), GFP_KERNEL);
- if (!namespace)
+ ns = kmalloc(sizeof(*ns), GFP_KERNEL);
+ if (!ns)
panic("Can't allocate initial namespace");
- atomic_set(&namespace->count, 1);
- INIT_LIST_HEAD(&namespace->list);
- init_waitqueue_head(&namespace->poll);
- namespace->event = 0;
- list_add(&mnt->mnt_list, &namespace->list);
- namespace->root = mnt;
- mnt->mnt_namespace = namespace;
-
- init_task.namespace = namespace;
- read_lock(&tasklist_lock);
- do_each_thread(g, p) {
- get_namespace(namespace);
- p->namespace = namespace;
- } while_each_thread(g, p);
- read_unlock(&tasklist_lock);
-
- set_fs_pwd(current->fs, namespace->root, namespace->root->mnt_root);
- set_fs_root(current->fs, namespace->root, namespace->root->mnt_root);
+ atomic_set(&ns->count, 1);
+ INIT_LIST_HEAD(&ns->list);
+ init_waitqueue_head(&ns->poll);
+ ns->event = 0;
+ list_add(&mnt->mnt_list, &ns->list);
+ ns->root = mnt;
+ mnt->mnt_ns = ns;
+
+ init_task.nsproxy->mnt_ns = ns;
+ get_mnt_ns(ns);
+
+ set_fs_pwd(current->fs, ns->root, ns->root->mnt_root);
+ set_fs_root(current->fs, ns->root, ns->root->mnt_root);
}
-void __init mnt_init(unsigned long mempages)
+void __init mnt_init(void)
{
struct list_head *d;
unsigned int nr_hash;
int i;
+ int err;
init_rwsem(&namespace_sem);
mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
- 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL, NULL);
+ 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
d++;
i--;
} while (i);
- sysfs_init();
+ err = sysfs_init();
+ if (err)
+ printk(KERN_WARNING "%s: sysfs_init error: %d\n",
+ __FUNCTION__, err);
+ err = subsystem_register(&fs_subsys);
+ if (err)
+ printk(KERN_WARNING "%s: subsystem_register error: %d\n",
+ __FUNCTION__, err);
init_rootfs();
init_mount_tree();
}
-void __put_namespace(struct namespace *namespace)
+void __put_mnt_ns(struct mnt_namespace *ns)
{
- struct vfsmount *root = namespace->root;
+ struct vfsmount *root = ns->root;
LIST_HEAD(umount_list);
- namespace->root = NULL;
+ ns->root = NULL;
spin_unlock(&vfsmount_lock);
down_write(&namespace_sem);
spin_lock(&vfsmount_lock);
spin_unlock(&vfsmount_lock);
up_write(&namespace_sem);
release_mounts(&umount_list);
- kfree(namespace);
+ kfree(ns);
}