[PATCH] x86: Revert new unwind kernel stack termination
[safe/jmp/linux-2.6] / fs / dcache.c
index 86bdb93..2bac4ba 100644 (file)
@@ -14,7 +14,6 @@
  * the dcache entry is deleted or garbage collected.
  */
 
-#include <linux/config.h>
 #include <linux/syscalls.h>
 #include <linux/string.h>
 #include <linux/mm.h>
 #include <linux/seqlock.h>
 #include <linux/swap.h>
 #include <linux/bootmem.h>
+#include "internal.h"
 
-/* #define DCACHE_DEBUG 1 */
 
-int sysctl_vfs_cache_pressure = 100;
+int sysctl_vfs_cache_pressure __read_mostly = 100;
 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
 
  __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
-static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
+static __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
 
 EXPORT_SYMBOL(dcache_lock);
 
-static kmem_cache_t *dentry_cache
+static kmem_cache_t *dentry_cache __read_mostly;
 
 #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
 
@@ -59,9 +58,9 @@ static kmem_cache_t *dentry_cache;
 #define D_HASHBITS     d_hash_shift
 #define D_HASHMASK     d_hash_mask
 
-static unsigned int d_hash_mask;
-static unsigned int d_hash_shift;
-static struct hlist_head *dentry_hashtable;
+static unsigned int d_hash_mask __read_mostly;
+static unsigned int d_hash_shift __read_mostly;
+static struct hlist_head *dentry_hashtable __read_mostly;
 static LIST_HEAD(dentry_unused);
 
 /* Statistics gathering. */
@@ -292,9 +291,9 @@ struct dentry * dget_locked(struct dentry *dentry)
  * it can be unhashed only if it has no children, or if it is the root
  * of a filesystem.
  *
- * If the inode has a DCACHE_DISCONNECTED alias, then prefer
+ * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
  * any other hashed alias over that one unless @want_discon is set,
- * in which case only return a DCACHE_DISCONNECTED alias.
+ * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
  */
 
 static struct dentry * __d_find_alias(struct inode *inode, int want_discon)
@@ -310,7 +309,8 @@ static struct dentry * __d_find_alias(struct inode *inode, int want_discon)
                prefetch(next);
                alias = list_entry(tmp, struct dentry, d_alias);
                if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
-                       if (alias->d_flags & DCACHE_DISCONNECTED)
+                       if (IS_ROOT(alias) &&
+                           (alias->d_flags & DCACHE_DISCONNECTED))
                                discon_alias = alias;
                        else if (!want_discon) {
                                __dget_locked(alias);
@@ -325,10 +325,13 @@ static struct dentry * __d_find_alias(struct inode *inode, int want_discon)
 
 struct dentry * d_find_alias(struct inode *inode)
 {
-       struct dentry *de;
-       spin_lock(&dcache_lock);
-       de = __d_find_alias(inode, 0);
-       spin_unlock(&dcache_lock);
+       struct dentry *de = NULL;
+
+       if (!list_empty(&inode->i_dentry)) {
+               spin_lock(&dcache_lock);
+               de = __d_find_alias(inode, 0);
+               spin_unlock(&dcache_lock);
+       }
        return de;
 }
 
@@ -357,12 +360,13 @@ restart:
 }
 
 /*
- * Throw away a dentry - free the inode, dput the parent.
- * This requires that the LRU list has already been
- * removed.
+ * Throw away a dentry - free the inode, dput the parent.  This requires that
+ * the LRU list has already been removed.
+ *
  * Called with dcache_lock, drops it and then regains.
+ * Called with dentry->d_lock held, drops it.
  */
-static inline void prune_one_dentry(struct dentry * dentry)
+static void prune_one_dentry(struct dentry * dentry)
 {
        struct dentry * parent;
 
@@ -380,6 +384,8 @@ static inline void prune_one_dentry(struct dentry * dentry)
 /**
  * prune_dcache - shrink the dcache
  * @count: number of entries to try and free
+ * @sb: if given, ignore dentries for other superblocks
+ *         which are being unmounted.
  *
  * Shrink the dcache. This is done when we need
  * more memory, or simply when we need to unmount
@@ -390,16 +396,29 @@ static inline void prune_one_dentry(struct dentry * dentry)
  * all the dentries are in use.
  */
  
-static void prune_dcache(int count)
+static void prune_dcache(int count, struct super_block *sb)
 {
        spin_lock(&dcache_lock);
        for (; count ; count--) {
                struct dentry *dentry;
                struct list_head *tmp;
+               struct rw_semaphore *s_umount;
 
                cond_resched_lock(&dcache_lock);
 
                tmp = dentry_unused.prev;
+               if (sb) {
+                       /* Try to find a dentry for this sb, but don't try
+                        * too hard, if they aren't near the tail they will
+                        * be moved down again soon
+                        */
+                       int skip = count;
+                       while (skip && tmp != &dentry_unused &&
+                           list_entry(tmp, struct dentry, d_lru)->d_sb != sb) {
+                               skip--;
+                               tmp = tmp->prev;
+                       }
+               }
                if (tmp == &dentry_unused)
                        break;
                list_del_init(tmp);
@@ -425,7 +444,45 @@ static void prune_dcache(int count)
                        spin_unlock(&dentry->d_lock);
                        continue;
                }
-               prune_one_dentry(dentry);
+               /*
+                * If the dentry is not DCACHED_REFERENCED, it is time
+                * to remove it from the dcache, provided the super block is
+                * NULL (which means we are trying to reclaim memory)
+                * or this dentry belongs to the same super block that
+                * we want to shrink.
+                */
+               /*
+                * If this dentry is for "my" filesystem, then I can prune it
+                * without taking the s_umount lock (I already hold it).
+                */
+               if (sb && dentry->d_sb == sb) {
+                       prune_one_dentry(dentry);
+                       continue;
+               }
+               /*
+                * ...otherwise we need to be sure this filesystem isn't being
+                * unmounted, otherwise we could race with
+                * generic_shutdown_super(), and end up holding a reference to
+                * an inode while the filesystem is unmounted.
+                * So we try to get s_umount, and make sure s_root isn't NULL.
+                * (Take a local copy of s_umount to avoid a use-after-free of
+                * `dentry').
+                */
+               s_umount = &dentry->d_sb->s_umount;
+               if (down_read_trylock(s_umount)) {
+                       if (dentry->d_sb->s_root != NULL) {
+                               prune_one_dentry(dentry);
+                               up_read(s_umount);
+                               continue;
+                       }
+                       up_read(s_umount);
+               }
+               spin_unlock(&dentry->d_lock);
+               /* Cannot remove the first dentry, and it isn't appropriate
+                * to move it to the head of the list, so give up, and try
+                * later
+                */
+               break;
        }
        spin_unlock(&dcache_lock);
 }
@@ -466,8 +523,7 @@ void shrink_dcache_sb(struct super_block * sb)
                dentry = list_entry(tmp, struct dentry, d_lru);
                if (dentry->d_sb != sb)
                        continue;
-               list_del(tmp);
-               list_add(tmp, &dentry_unused);
+               list_move(tmp, &dentry_unused);
        }
 
        /*
@@ -486,12 +542,143 @@ repeat:
                        continue;
                }
                prune_one_dentry(dentry);
+               cond_resched_lock(&dcache_lock);
                goto repeat;
        }
        spin_unlock(&dcache_lock);
 }
 
 /*
+ * destroy a single subtree of dentries for unmount
+ * - see the comments on shrink_dcache_for_umount() for a description of the
+ *   locking
+ */
+static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
+{
+       struct dentry *parent;
+
+       BUG_ON(!IS_ROOT(dentry));
+
+       /* detach this root from the system */
+       spin_lock(&dcache_lock);
+       if (!list_empty(&dentry->d_lru)) {
+               dentry_stat.nr_unused--;
+               list_del_init(&dentry->d_lru);
+       }
+       __d_drop(dentry);
+       spin_unlock(&dcache_lock);
+
+       for (;;) {
+               /* descend to the first leaf in the current subtree */
+               while (!list_empty(&dentry->d_subdirs)) {
+                       struct dentry *loop;
+
+                       /* this is a branch with children - detach all of them
+                        * from the system in one go */
+                       spin_lock(&dcache_lock);
+                       list_for_each_entry(loop, &dentry->d_subdirs,
+                                           d_u.d_child) {
+                               if (!list_empty(&loop->d_lru)) {
+                                       dentry_stat.nr_unused--;
+                                       list_del_init(&loop->d_lru);
+                               }
+
+                               __d_drop(loop);
+                               cond_resched_lock(&dcache_lock);
+                       }
+                       spin_unlock(&dcache_lock);
+
+                       /* move to the first child */
+                       dentry = list_entry(dentry->d_subdirs.next,
+                                           struct dentry, d_u.d_child);
+               }
+
+               /* consume the dentries from this leaf up through its parents
+                * until we find one with children or run out altogether */
+               do {
+                       struct inode *inode;
+
+                       if (atomic_read(&dentry->d_count) != 0) {
+                               printk(KERN_ERR
+                                      "BUG: Dentry %p{i=%lx,n=%s}"
+                                      " still in use (%d)"
+                                      " [unmount of %s %s]\n",
+                                      dentry,
+                                      dentry->d_inode ?
+                                      dentry->d_inode->i_ino : 0UL,
+                                      dentry->d_name.name,
+                                      atomic_read(&dentry->d_count),
+                                      dentry->d_sb->s_type->name,
+                                      dentry->d_sb->s_id);
+                               BUG();
+                       }
+
+                       parent = dentry->d_parent;
+                       if (parent == dentry)
+                               parent = NULL;
+                       else
+                               atomic_dec(&parent->d_count);
+
+                       list_del(&dentry->d_u.d_child);
+                       dentry_stat.nr_dentry--;        /* For d_free, below */
+
+                       inode = dentry->d_inode;
+                       if (inode) {
+                               dentry->d_inode = NULL;
+                               list_del_init(&dentry->d_alias);
+                               if (dentry->d_op && dentry->d_op->d_iput)
+                                       dentry->d_op->d_iput(dentry, inode);
+                               else
+                                       iput(inode);
+                       }
+
+                       d_free(dentry);
+
+                       /* finished when we fall off the top of the tree,
+                        * otherwise we ascend to the parent and move to the
+                        * next sibling if there is one */
+                       if (!parent)
+                               return;
+
+                       dentry = parent;
+
+               } while (list_empty(&dentry->d_subdirs));
+
+               dentry = list_entry(dentry->d_subdirs.next,
+                                   struct dentry, d_u.d_child);
+       }
+}
+
+/*
+ * destroy the dentries attached to a superblock on unmounting
+ * - we don't need to use dentry->d_lock, and only need dcache_lock when
+ *   removing the dentry from the system lists and hashes because:
+ *   - the superblock is detached from all mountings and open files, so the
+ *     dentry trees will not be rearranged by the VFS
+ *   - s_umount is write-locked, so the memory pressure shrinker will ignore
+ *     any dentries belonging to this superblock that it comes across
+ *   - the filesystem itself is no longer permitted to rearrange the dentries
+ *     in this superblock
+ */
+void shrink_dcache_for_umount(struct super_block *sb)
+{
+       struct dentry *dentry;
+
+       if (down_read_trylock(&sb->s_umount))
+               BUG();
+
+       dentry = sb->s_root;
+       sb->s_root = NULL;
+       atomic_dec(&dentry->d_count);
+       shrink_dcache_for_umount_subtree(dentry);
+
+       while (!hlist_empty(&sb->s_anon)) {
+               dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash);
+               shrink_dcache_for_umount_subtree(dentry);
+       }
+}
+
+/*
  * Search for at least 1 mount point in the dentry's subdirs.
  * We descend to the next level whenever the d_subdirs
  * list is non-empty and continue searching.
@@ -581,7 +768,7 @@ resume:
                 * of the unused list for prune_dcache
                 */
                if (!atomic_read(&dentry->d_count)) {
-                       list_add(&dentry->d_lru, dentry_unused.prev);
+                       list_add_tail(&dentry->d_lru, &dentry_unused);
                        dentry_stat.nr_unused++;
                        found++;
                }
@@ -599,10 +786,6 @@ resume:
                 */
                if (!list_empty(&dentry->d_subdirs)) {
                        this_parent = dentry;
-#ifdef DCACHE_DEBUG
-printk(KERN_DEBUG "select_parent: descending to %s/%s, found=%d\n",
-dentry->d_parent->d_name.name, dentry->d_name.name, found);
-#endif
                        goto repeat;
                }
        }
@@ -612,10 +795,6 @@ dentry->d_parent->d_name.name, dentry->d_name.name, found);
        if (this_parent != parent) {
                next = this_parent->d_u.d_child.next;
                this_parent = this_parent->d_parent;
-#ifdef DCACHE_DEBUG
-printk(KERN_DEBUG "select_parent: ascending to %s/%s, found=%d\n",
-this_parent->d_parent->d_name.name, this_parent->d_name.name, found);
-#endif
                goto resume;
        }
 out:
@@ -635,46 +814,7 @@ void shrink_dcache_parent(struct dentry * parent)
        int found;
 
        while ((found = select_parent(parent)) != 0)
-               prune_dcache(found);
-}
-
-/**
- * shrink_dcache_anon - further prune the cache
- * @head: head of d_hash list of dentries to prune
- *
- * Prune the dentries that are anonymous
- *
- * parsing d_hash list does not hlist_for_each_entry_rcu() as it
- * done under dcache_lock.
- *
- */
-void shrink_dcache_anon(struct hlist_head *head)
-{
-       struct hlist_node *lp;
-       int found;
-       do {
-               found = 0;
-               spin_lock(&dcache_lock);
-               hlist_for_each(lp, head) {
-                       struct dentry *this = hlist_entry(lp, struct dentry, d_hash);
-                       if (!list_empty(&this->d_lru)) {
-                               dentry_stat.nr_unused--;
-                               list_del_init(&this->d_lru);
-                       }
-
-                       /* 
-                        * move only zero ref count dentries to the end 
-                        * of the unused list for prune_dcache
-                        */
-                       if (!atomic_read(&this->d_count)) {
-                               list_add_tail(&this->d_lru, &dentry_unused);
-                               dentry_stat.nr_unused++;
-                               found++;
-                       }
-               }
-               spin_unlock(&dcache_lock);
-               prune_dcache(found);
-       } while(found);
+               prune_dcache(found, parent->d_sb);
 }
 
 /*
@@ -694,7 +834,7 @@ static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
        if (nr) {
                if (!(gfp_mask & __GFP_FS))
                        return -1;
-               prune_dcache(nr);
+               prune_dcache(nr, NULL);
        }
        return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
 }
@@ -743,7 +883,9 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
        dentry->d_op = NULL;
        dentry->d_fsdata = NULL;
        dentry->d_mounted = 0;
+#ifdef CONFIG_PROFILING
        dentry->d_cookie = NULL;
+#endif
        INIT_HLIST_NODE(&dentry->d_hash);
        INIT_LIST_HEAD(&dentry->d_lru);
        INIT_LIST_HEAD(&dentry->d_subdirs);
@@ -792,11 +934,12 @@ struct dentry *d_alloc_name(struct dentry *parent, const char *name)
  
 void d_instantiate(struct dentry *entry, struct inode * inode)
 {
-       if (!list_empty(&entry->d_alias)) BUG();
+       BUG_ON(!list_empty(&entry->d_alias));
        spin_lock(&dcache_lock);
        if (inode)
                list_add(&entry->d_alias, &inode->i_dentry);
        entry->d_inode = inode;
+       fsnotify_d_instantiate(entry, inode);
        spin_unlock(&dcache_lock);
        security_d_instantiate(entry, inode);
 }
@@ -817,17 +960,19 @@ void d_instantiate(struct dentry *entry, struct inode * inode)
  * (or otherwise set) by the caller to indicate that it is now
  * in use by the dcache.
  */
-struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
+static struct dentry *__d_instantiate_unique(struct dentry *entry,
+                                            struct inode *inode)
 {
        struct dentry *alias;
        int len = entry->d_name.len;
        const char *name = entry->d_name.name;
        unsigned int hash = entry->d_name.hash;
 
-       BUG_ON(!list_empty(&entry->d_alias));
-       spin_lock(&dcache_lock);
-       if (!inode)
-               goto do_negative;
+       if (!inode) {
+               entry->d_inode = NULL;
+               return NULL;
+       }
+
        list_for_each_entry(alias, &inode->i_dentry, d_alias) {
                struct qstr *qstr = &alias->d_name;
 
@@ -840,18 +985,35 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
                if (memcmp(qstr->name, name, len))
                        continue;
                dget_locked(alias);
-               spin_unlock(&dcache_lock);
-               BUG_ON(!d_unhashed(alias));
-               iput(inode);
                return alias;
        }
+
        list_add(&entry->d_alias, &inode->i_dentry);
-do_negative:
        entry->d_inode = inode;
-       spin_unlock(&dcache_lock);
-       security_d_instantiate(entry, inode);
+       fsnotify_d_instantiate(entry, inode);
        return NULL;
 }
+
+struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
+{
+       struct dentry *result;
+
+       BUG_ON(!list_empty(&entry->d_alias));
+
+       spin_lock(&dcache_lock);
+       result = __d_instantiate_unique(entry, inode);
+       spin_unlock(&dcache_lock);
+
+       if (!result) {
+               security_d_instantiate(entry, inode);
+               return NULL;
+       }
+
+       BUG_ON(!d_unhashed(result));
+       iput(inode);
+       return result;
+}
+
 EXPORT_SYMBOL(d_instantiate_unique);
 
 /**
@@ -973,11 +1135,12 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
 {
        struct dentry *new = NULL;
 
-       if (inode) {
+       if (inode && S_ISDIR(inode->i_mode)) {
                spin_lock(&dcache_lock);
                new = __d_find_alias(inode, 1);
                if (new) {
                        BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
+                       fsnotify_d_instantiate(new, inode);
                        spin_unlock(&dcache_lock);
                        security_d_instantiate(new, inode);
                        d_rehash(dentry);
@@ -987,6 +1150,7 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
                        /* d_instantiate takes dcache_lock, so we do it by hand */
                        list_add(&dentry->d_alias, &inode->i_dentry);
                        dentry->d_inode = inode;
+                       fsnotify_d_instantiate(dentry, inode);
                        spin_unlock(&dcache_lock);
                        security_d_instantiate(dentry, inode);
                        d_rehash(dentry);
@@ -1100,6 +1264,32 @@ next:
 }
 
 /**
+ * d_hash_and_lookup - hash the qstr then search for a dentry
+ * @dir: Directory to search in
+ * @name: qstr of name we wish to find
+ *
+ * On hash failure or on lookup failure NULL is returned.
+ */
+struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
+{
+       struct dentry *dentry = NULL;
+
+       /*
+        * Check for a fs-specific hash function. Note that we must
+        * calculate the standard hash first, as the d_op->d_hash()
+        * routine may choose to leave the hash value unchanged.
+        */
+       name->hash = full_name_hash(name->name, name->len);
+       if (dir->d_op && dir->d_op->d_hash) {
+               if (dir->d_op->d_hash(dir, name) < 0)
+                       goto out;
+       }
+       dentry = d_lookup(dir, name);
+out:
+       return dentry;
+}
+
+/**
  * d_validate - verify dentry provided from insecure source
  * @dentry: The dentry alleged to be valid child of @dparent
  * @dparent: The parent dentry (known to be valid)
@@ -1173,6 +1363,9 @@ void d_delete(struct dentry * dentry)
        if (atomic_read(&dentry->d_count) == 1) {
                dentry_iput(dentry);
                fsnotify_nameremove(dentry, isdir);
+
+               /* remove this and other inotify debug checks after 2.6.18 */
+               dentry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
                return;
        }
 
@@ -1192,6 +1385,11 @@ static void __d_rehash(struct dentry * entry, struct hlist_head *list)
        hlist_add_head_rcu(&entry->d_hash, list);
 }
 
+static void _d_rehash(struct dentry * entry)
+{
+       __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
+}
+
 /**
  * d_rehash    - add an entry back to the hash
  * @entry: dentry to add to the hash
@@ -1201,11 +1399,9 @@ static void __d_rehash(struct dentry * entry, struct hlist_head *list)
  
 void d_rehash(struct dentry * entry)
 {
-       struct hlist_head *list = d_hash(entry->d_parent, entry->d_name.hash);
-
        spin_lock(&dcache_lock);
        spin_lock(&entry->d_lock);
-       __d_rehash(entry, list);
+       _d_rehash(entry);
        spin_unlock(&entry->d_lock);
        spin_unlock(&dcache_lock);
 }
@@ -1296,10 +1492,10 @@ void d_move(struct dentry * dentry, struct dentry * target)
         */
        if (target < dentry) {
                spin_lock(&target->d_lock);
-               spin_lock(&dentry->d_lock);
+               spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
        } else {
                spin_lock(&dentry->d_lock);
-               spin_lock(&target->d_lock);
+               spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED);
        }
 
        /* Move the dentry to the target hash queue, if on different bucket */
@@ -1337,11 +1533,126 @@ already_unhashed:
 
        list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
        spin_unlock(&target->d_lock);
+       fsnotify_d_move(dentry);
        spin_unlock(&dentry->d_lock);
        write_sequnlock(&rename_lock);
        spin_unlock(&dcache_lock);
 }
 
+/*
+ * Prepare an anonymous dentry for life in the superblock's dentry tree as a
+ * named dentry in place of the dentry to be replaced.
+ */
+static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
+{
+       struct dentry *dparent, *aparent;
+
+       switch_names(dentry, anon);
+       do_switch(dentry->d_name.len, anon->d_name.len);
+       do_switch(dentry->d_name.hash, anon->d_name.hash);
+
+       dparent = dentry->d_parent;
+       aparent = anon->d_parent;
+
+       dentry->d_parent = (aparent == anon) ? dentry : aparent;
+       list_del(&dentry->d_u.d_child);
+       if (!IS_ROOT(dentry))
+               list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
+       else
+               INIT_LIST_HEAD(&dentry->d_u.d_child);
+
+       anon->d_parent = (dparent == dentry) ? anon : dparent;
+       list_del(&anon->d_u.d_child);
+       if (!IS_ROOT(anon))
+               list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs);
+       else
+               INIT_LIST_HEAD(&anon->d_u.d_child);
+
+       anon->d_flags &= ~DCACHE_DISCONNECTED;
+}
+
+/**
+ * d_materialise_unique - introduce an inode into the tree
+ * @dentry: candidate dentry
+ * @inode: inode to bind to the dentry, to which aliases may be attached
+ *
+ * Introduces an dentry into the tree, substituting an extant disconnected
+ * root directory alias in its place if there is one
+ */
+struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
+{
+       struct dentry *alias, *actual;
+
+       BUG_ON(!d_unhashed(dentry));
+
+       spin_lock(&dcache_lock);
+
+       if (!inode) {
+               actual = dentry;
+               dentry->d_inode = NULL;
+               goto found_lock;
+       }
+
+       /* See if a disconnected directory already exists as an anonymous root
+        * that we should splice into the tree instead */
+       if (S_ISDIR(inode->i_mode) && (alias = __d_find_alias(inode, 1))) {
+               spin_lock(&alias->d_lock);
+
+               /* Is this a mountpoint that we could splice into our tree? */
+               if (IS_ROOT(alias))
+                       goto connect_mountpoint;
+
+               if (alias->d_name.len == dentry->d_name.len &&
+                   alias->d_parent == dentry->d_parent &&
+                   memcmp(alias->d_name.name,
+                          dentry->d_name.name,
+                          dentry->d_name.len) == 0)
+                       goto replace_with_alias;
+
+               spin_unlock(&alias->d_lock);
+
+               /* Doh! Seem to be aliasing directories for some reason... */
+               dput(alias);
+       }
+
+       /* Add a unique reference */
+       actual = __d_instantiate_unique(dentry, inode);
+       if (!actual)
+               actual = dentry;
+       else if (unlikely(!d_unhashed(actual)))
+               goto shouldnt_be_hashed;
+
+found_lock:
+       spin_lock(&actual->d_lock);
+found:
+       _d_rehash(actual);
+       spin_unlock(&actual->d_lock);
+       spin_unlock(&dcache_lock);
+
+       if (actual == dentry) {
+               security_d_instantiate(dentry, inode);
+               return NULL;
+       }
+
+       iput(inode);
+       return actual;
+
+       /* Convert the anonymous/root alias into an ordinary dentry */
+connect_mountpoint:
+       __d_materialise_dentry(dentry, alias);
+
+       /* Replace the candidate dentry with the alias in the tree */
+replace_with_alias:
+       __d_drop(alias);
+       actual = alias;
+       goto found;
+
+shouldnt_be_hashed:
+       spin_unlock(&dcache_lock);
+       BUG();
+       goto shouldnt_be_hashed;
+}
+
 /**
  * d_path - return the path of a dentry
  * @dentry: dentry to report
@@ -1611,26 +1922,12 @@ ino_t find_inode_number(struct dentry *dir, struct qstr *name)
        struct dentry * dentry;
        ino_t ino = 0;
 
-       /*
-        * Check for a fs-specific hash function. Note that we must
-        * calculate the standard hash first, as the d_op->d_hash()
-        * routine may choose to leave the hash value unchanged.
-        */
-       name->hash = full_name_hash(name->name, name->len);
-       if (dir->d_op && dir->d_op->d_hash)
-       {
-               if (dir->d_op->d_hash(dir, name) != 0)
-                       goto out;
-       }
-
-       dentry = d_lookup(dir, name);
-       if (dentry)
-       {
+       dentry = d_hash_and_lookup(dir, name);
+       if (dentry) {
                if (dentry->d_inode)
                        ino = dentry->d_inode->i_ino;
                dput(dentry);
        }
-out:
        return ino;
 }
 
@@ -1680,7 +1977,8 @@ static void __init dcache_init(unsigned long mempages)
        dentry_cache = kmem_cache_create("dentry_cache",
                                         sizeof(struct dentry),
                                         0,
-                                        SLAB_RECLAIM_ACCOUNT|SLAB_PANIC,
+                                        (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
+                                        SLAB_MEM_SPREAD),
                                         NULL, NULL);
        
        set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
@@ -1704,16 +2002,13 @@ static void __init dcache_init(unsigned long mempages)
 }
 
 /* SLAB cache for __getname() consumers */
-kmem_cache_t *names_cachep;
+kmem_cache_t *names_cachep __read_mostly;
 
 /* SLAB cache for file structures */
-kmem_cache_t *filp_cachep;
+kmem_cache_t *filp_cachep __read_mostly;
 
 EXPORT_SYMBOL(d_genocide);
 
-extern void bdev_cache_init(void);
-extern void chrdev_init(void);
-
 void __init vfs_caches_init_early(void)
 {
        dcache_init_early();
@@ -1734,7 +2029,7 @@ void __init vfs_caches_init(unsigned long mempages)
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
 
        filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, filp_ctor, filp_dtor);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
 
        dcache_init(mempages);
        inode_init(mempages);
@@ -1753,6 +2048,7 @@ EXPORT_SYMBOL(d_instantiate);
 EXPORT_SYMBOL(d_invalidate);
 EXPORT_SYMBOL(d_lookup);
 EXPORT_SYMBOL(d_move);
+EXPORT_SYMBOL_GPL(d_materialise_unique);
 EXPORT_SYMBOL(d_path);
 EXPORT_SYMBOL(d_prune_aliases);
 EXPORT_SYMBOL(d_rehash);