ACPICA: Fixed a problem with CopyObject used in conjunction with the Index operator
[safe/jmp/linux-2.6] / fs / dcache.c
index a5b76b6..4345577 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/fsnotify.h>
 #include <linux/slab.h>
 #include <linux/init.h>
-#include <linux/smp_lock.h>
 #include <linux/hash.h>
 #include <linux/cache.h>
 #include <linux/module.h>
@@ -39,11 +38,11 @@ int sysctl_vfs_cache_pressure __read_mostly = 100;
 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
 
  __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
-static __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
+__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
 
 EXPORT_SYMBOL(dcache_lock);
 
-static kmem_cache_t *dentry_cache __read_mostly;
+static struct kmem_cache *dentry_cache __read_mostly;
 
 #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
 
@@ -68,15 +67,19 @@ struct dentry_stat_t dentry_stat = {
        .age_limit = 45,
 };
 
-static void d_callback(struct rcu_head *head)
+static void __d_free(struct dentry *dentry)
 {
-       struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
-
        if (dname_external(dentry))
                kfree(dentry->d_name.name);
        kmem_cache_free(dentry_cache, dentry); 
 }
 
+static void d_callback(struct rcu_head *head)
+{
+       struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
+       __d_free(dentry);
+}
+
 /*
  * no dcache_lock, please.  The caller must decrement dentry_stat.nr_dentry
  * inside dcache_lock.
@@ -85,7 +88,19 @@ static void d_free(struct dentry *dentry)
 {
        if (dentry->d_op && dentry->d_op->d_release)
                dentry->d_op->d_release(dentry);
-       call_rcu(&dentry->d_u.d_rcu, d_callback);
+       /* if dentry was never inserted into hash, immediate free is OK */
+       if (hlist_unhashed(&dentry->d_hash))
+               __d_free(dentry);
+       else
+               call_rcu(&dentry->d_u.d_rcu, d_callback);
+}
+
+static void dentry_lru_remove(struct dentry *dentry)
+{
+       if (!list_empty(&dentry->d_lru)) {
+               list_del_init(&dentry->d_lru);
+               dentry_stat.nr_unused--;
+       }
 }
 
 /*
@@ -113,6 +128,28 @@ static void dentry_iput(struct dentry * dentry)
        }
 }
 
+/**
+ * d_kill - kill dentry and return parent
+ * @dentry: dentry to kill
+ *
+ * Called with dcache_lock and d_lock, releases both.  The dentry must
+ * already be unhashed and removed from the LRU.
+ *
+ * If this is the root of the dentry tree, return NULL.
+ */
+static struct dentry *d_kill(struct dentry *dentry)
+{
+       struct dentry *parent;
+
+       list_del(&dentry->d_u.d_child);
+       dentry_stat.nr_dentry--;        /* For d_free, below */
+       /*drops the locks, at that point nobody can reach this dentry */
+       dentry_iput(dentry);
+       parent = dentry->d_parent;
+       d_free(dentry);
+       return dentry == parent ? NULL : parent;
+}
+
 /* 
  * This is dput
  *
@@ -181,28 +218,11 @@ repeat:
 
 unhash_it:
        __d_drop(dentry);
-
-kill_it: {
-               struct dentry *parent;
-
-               /* If dentry was on d_lru list
-                * delete it from there
-                */
-               if (!list_empty(&dentry->d_lru)) {
-                       list_del(&dentry->d_lru);
-                       dentry_stat.nr_unused--;
-               }
-               list_del(&dentry->d_u.d_child);
-               dentry_stat.nr_dentry--;        /* For d_free, below */
-               /*drops the locks, at that point nobody can reach this dentry */
-               dentry_iput(dentry);
-               parent = dentry->d_parent;
-               d_free(dentry);
-               if (dentry == parent)
-                       return;
-               dentry = parent;
+kill_it:
+       dentry_lru_remove(dentry);
+       dentry = d_kill(dentry);
+       if (dentry)
                goto repeat;
-       }
 }
 
 /**
@@ -267,10 +287,7 @@ int d_invalidate(struct dentry * dentry)
 static inline struct dentry * __dget_locked(struct dentry *dentry)
 {
        atomic_inc(&dentry->d_count);
-       if (!list_empty(&dentry->d_lru)) {
-               dentry_stat.nr_unused--;
-               list_del_init(&dentry->d_lru);
-       }
+       dentry_lru_remove(dentry);
        return dentry;
 }
 
@@ -363,22 +380,34 @@ restart:
  * Throw away a dentry - free the inode, dput the parent.  This requires that
  * the LRU list has already been removed.
  *
+ * Try to prune ancestors as well.  This is necessary to prevent
+ * quadratic behavior of shrink_dcache_parent(), but is also expected
+ * to be beneficial in reducing dentry cache fragmentation.
+ *
  * Called with dcache_lock, drops it and then regains.
  * Called with dentry->d_lock held, drops it.
  */
 static void prune_one_dentry(struct dentry * dentry)
 {
-       struct dentry * parent;
-
        __d_drop(dentry);
-       list_del(&dentry->d_u.d_child);
-       dentry_stat.nr_dentry--;        /* For d_free, below */
-       dentry_iput(dentry);
-       parent = dentry->d_parent;
-       d_free(dentry);
-       if (parent != dentry)
-               dput(parent);
+       dentry = d_kill(dentry);
+
+       /*
+        * Prune ancestors.  Locking is simpler than in dput(),
+        * because dcache_lock needs to be taken anyway.
+        */
        spin_lock(&dcache_lock);
+       while (dentry) {
+               if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock))
+                       return;
+
+               if (dentry->d_op && dentry->d_op->d_delete)
+                       dentry->d_op->d_delete(dentry);
+               dentry_lru_remove(dentry);
+               __d_drop(dentry);
+               dentry = d_kill(dentry);
+               spin_lock(&dcache_lock);
+       }
 }
 
 /**
@@ -520,18 +549,18 @@ void shrink_dcache_sb(struct super_block * sb)
         * superblock to the most recent end of the unused list.
         */
        spin_lock(&dcache_lock);
-       list_for_each_safe(tmp, next, &dentry_unused) {
+       list_for_each_prev_safe(tmp, next, &dentry_unused) {
                dentry = list_entry(tmp, struct dentry, d_lru);
                if (dentry->d_sb != sb)
                        continue;
-               list_move(tmp, &dentry_unused);
+               list_move_tail(tmp, &dentry_unused);
        }
 
        /*
         * Pass two ... free the dentries for this superblock.
         */
 repeat:
-       list_for_each_safe(tmp, next, &dentry_unused) {
+       list_for_each_prev_safe(tmp, next, &dentry_unused) {
                dentry = list_entry(tmp, struct dentry, d_lru);
                if (dentry->d_sb != sb)
                        continue;
@@ -557,15 +586,13 @@ repeat:
 static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
 {
        struct dentry *parent;
+       unsigned detached = 0;
 
        BUG_ON(!IS_ROOT(dentry));
 
        /* detach this root from the system */
        spin_lock(&dcache_lock);
-       if (!list_empty(&dentry->d_lru)) {
-               dentry_stat.nr_unused--;
-               list_del_init(&dentry->d_lru);
-       }
+       dentry_lru_remove(dentry);
        __d_drop(dentry);
        spin_unlock(&dcache_lock);
 
@@ -579,11 +606,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
                        spin_lock(&dcache_lock);
                        list_for_each_entry(loop, &dentry->d_subdirs,
                                            d_u.d_child) {
-                               if (!list_empty(&loop->d_lru)) {
-                                       dentry_stat.nr_unused--;
-                                       list_del_init(&loop->d_lru);
-                               }
-
+                               dentry_lru_remove(loop);
                                __d_drop(loop);
                                cond_resched_lock(&dcache_lock);
                        }
@@ -621,7 +644,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
                                atomic_dec(&parent->d_count);
 
                        list_del(&dentry->d_u.d_child);
-                       dentry_stat.nr_dentry--;        /* For d_free, below */
+                       detached++;
 
                        inode = dentry->d_inode;
                        if (inode) {
@@ -639,7 +662,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
                         * otherwise we ascend to the parent and move to the
                         * next sibling if there is one */
                        if (!parent)
-                               return;
+                               goto out;
 
                        dentry = parent;
 
@@ -648,6 +671,11 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
                dentry = list_entry(dentry->d_subdirs.next,
                                    struct dentry, d_u.d_child);
        }
+out:
+       /* several dentries were freed, need to correct nr_dentry */
+       spin_lock(&dcache_lock);
+       dentry_stat.nr_dentry -= detached;
+       spin_unlock(&dcache_lock);
 }
 
 /*
@@ -760,10 +788,7 @@ resume:
                struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
                next = tmp->next;
 
-               if (!list_empty(&dentry->d_lru)) {
-                       dentry_stat.nr_unused--;
-                       list_del_init(&dentry->d_lru);
-               }
+               dentry_lru_remove(dentry);
                /* 
                 * move only zero ref count dentries to the end 
                 * of the unused list for prune_dcache
@@ -840,6 +865,11 @@ static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
        return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
 }
 
+static struct shrinker dcache_shrinker = {
+       .shrink = shrink_dcache_memory,
+       .seeks = DEFAULT_SEEKS,
+};
+
 /**
  * d_alloc     -       allocate a dcache entry
  * @parent: parent of entry to allocate
@@ -855,7 +885,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
        struct dentry *dentry;
        char *dname;
 
-       dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 
+       dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
        if (!dentry)
                return NULL;
 
@@ -1364,9 +1394,6 @@ void d_delete(struct dentry * dentry)
        if (atomic_read(&dentry->d_count) == 1) {
                dentry_iput(dentry);
                fsnotify_nameremove(dentry, isdir);
-
-               /* remove this and other inotify debug checks after 2.6.18 */
-               dentry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
                return;
        }
 
@@ -1435,6 +1462,8 @@ static void switch_names(struct dentry *dentry, struct dentry *target)
                         * dentry:internal, target:external.  Steal target's
                         * storage and make target internal.
                         */
+                       memcpy(target->d_iname, dentry->d_name.name,
+                                       dentry->d_name.len + 1);
                        dentry->d_name.name = target->d_name.name;
                        target->d_name.name = target->d_iname;
                }
@@ -1466,8 +1495,8 @@ static void switch_names(struct dentry *dentry, struct dentry *target)
  * This forceful removal will result in ugly /proc output if
  * somebody holds a file open that got deleted due to a rename.
  * We could be nicer about the deleted file, and let it show
- * up under the name it got deleted rather than the name that
- * deleted it.
+ * up under the name it had before it was deleted rather than
+ * under the original name of the file that was moved on top of it.
  */
  
 /*
@@ -1498,7 +1527,7 @@ static void d_move_locked(struct dentry * dentry, struct dentry * target)
        }
 
        /* Move the dentry to the target hash queue, if on different bucket */
-       if (dentry->d_flags & DCACHE_UNHASHED)
+       if (d_unhashed(dentry))
                goto already_unhashed;
 
        hlist_del_rcu(&dentry->d_hash);
@@ -1733,9 +1762,8 @@ shouldnt_be_hashed:
  *
  * "buflen" should be positive. Caller holds the dcache_lock.
  */
-static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
-                       struct dentry *root, struct vfsmount *rootmnt,
-                       char *buffer, int buflen)
+static char *__d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
+                      struct path *root, char *buffer, int buflen)
 {
        char * end = buffer+buflen;
        char * retval;
@@ -1760,7 +1788,7 @@ static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
        for (;;) {
                struct dentry * parent;
 
-               if (dentry == root && vfsmnt == rootmnt)
+               if (dentry == root->dentry && vfsmnt == root->mnt)
                        break;
                if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
                        /* Global root? */
@@ -1801,27 +1829,67 @@ Elong:
        return ERR_PTR(-ENAMETOOLONG);
 }
 
-/* write full pathname into buffer and return start of pathname */
-char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
-                               char *buf, int buflen)
+/**
+ * d_path - return the path of a dentry
+ * @path: path to report
+ * @buf: buffer to return value in
+ * @buflen: buffer length
+ *
+ * Convert a dentry into an ASCII path name. If the entry has been deleted
+ * the string " (deleted)" is appended. Note that this is ambiguous.
+ *
+ * Returns the buffer or an error code if the path was too long.
+ *
+ * "buflen" should be positive. Caller holds the dcache_lock.
+ */
+char *d_path(struct path *path, char *buf, int buflen)
 {
        char *res;
-       struct vfsmount *rootmnt;
-       struct dentry *root;
+       struct path root;
+
+       /*
+        * We have various synthetic filesystems that never get mounted.  On
+        * these filesystems dentries are never used for lookup purposes, and
+        * thus don't need to be hashed.  They also don't need a name until a
+        * user wants to identify the object in /proc/pid/fd/.  The little hack
+        * below allows us to generate a name for these objects on demand:
+        */
+       if (path->dentry->d_op && path->dentry->d_op->d_dname)
+               return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
 
        read_lock(&current->fs->lock);
-       rootmnt = mntget(current->fs->rootmnt);
-       root = dget(current->fs->root);
+       root = current->fs->root;
+       path_get(&current->fs->root);
        read_unlock(&current->fs->lock);
        spin_lock(&dcache_lock);
-       res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
+       res = __d_path(path->dentry, path->mnt, &root, buf, buflen);
        spin_unlock(&dcache_lock);
-       dput(root);
-       mntput(rootmnt);
+       path_put(&root);
        return res;
 }
 
 /*
+ * Helper function for dentry_operations.d_dname() members
+ */
+char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
+                       const char *fmt, ...)
+{
+       va_list args;
+       char temp[64];
+       int sz;
+
+       va_start(args, fmt);
+       sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
+       va_end(args);
+
+       if (sz > sizeof(temp) || sz > buflen)
+               return ERR_PTR(-ENAMETOOLONG);
+
+       buffer += buflen - sz;
+       return memcpy(buffer, temp, sz);
+}
+
+/*
  * NOTE! The user-level library version returns a
  * character pointer. The kernel system call just
  * returns the length of the buffer filled (which
@@ -1842,28 +1910,27 @@ char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
 asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
 {
        int error;
-       struct vfsmount *pwdmnt, *rootmnt;
-       struct dentry *pwd, *root;
+       struct path pwd, root;
        char *page = (char *) __get_free_page(GFP_USER);
 
        if (!page)
                return -ENOMEM;
 
        read_lock(&current->fs->lock);
-       pwdmnt = mntget(current->fs->pwdmnt);
-       pwd = dget(current->fs->pwd);
-       rootmnt = mntget(current->fs->rootmnt);
-       root = dget(current->fs->root);
+       pwd = current->fs->pwd;
+       path_get(&current->fs->pwd);
+       root = current->fs->root;
+       path_get(&current->fs->root);
        read_unlock(&current->fs->lock);
 
        error = -ENOENT;
        /* Has the current directory has been unlinked? */
        spin_lock(&dcache_lock);
-       if (pwd->d_parent == pwd || !d_unhashed(pwd)) {
+       if (pwd.dentry->d_parent == pwd.dentry || !d_unhashed(pwd.dentry)) {
                unsigned long len;
                char * cwd;
 
-               cwd = __d_path(pwd, pwdmnt, root, rootmnt, page, PAGE_SIZE);
+               cwd = __d_path(pwd.dentry, pwd.mnt, &root, page, PAGE_SIZE);
                spin_unlock(&dcache_lock);
 
                error = PTR_ERR(cwd);
@@ -1881,10 +1948,8 @@ asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
                spin_unlock(&dcache_lock);
 
 out:
-       dput(pwd);
-       mntput(pwdmnt);
-       dput(root);
-       mntput(rootmnt);
+       path_put(&pwd);
+       path_put(&root);
        free_page((unsigned long) page);
        return error;
 }
@@ -2029,7 +2094,7 @@ static void __init dcache_init_early(void)
                INIT_HLIST_HEAD(&dentry_hashtable[loop]);
 }
 
-static void __init dcache_init(unsigned long mempages)
+static void __init dcache_init(void)
 {
        int loop;
 
@@ -2038,14 +2103,10 @@ static void __init dcache_init(unsigned long mempages)
         * but it is probably not worth it because of the cache nature
         * of the dcache. 
         */
-       dentry_cache = kmem_cache_create("dentry_cache",
-                                        sizeof(struct dentry),
-                                        0,
-                                        (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
-                                        SLAB_MEM_SPREAD),
-                                        NULL, NULL);
+       dentry_cache = KMEM_CACHE(dentry,
+               SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
        
-       set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
+       register_shrinker(&dcache_shrinker);
 
        /* Hash may have been set up in dcache_init_early */
        if (!hashdist)
@@ -2066,10 +2127,10 @@ static void __init dcache_init(unsigned long mempages)
 }
 
 /* SLAB cache for __getname() consumers */
-kmem_cache_t *names_cachep __read_mostly;
+struct kmem_cache *names_cachep __read_mostly;
 
 /* SLAB cache for file structures */
-kmem_cache_t *filp_cachep __read_mostly;
+struct kmem_cache *filp_cachep __read_mostly;
 
 EXPORT_SYMBOL(d_genocide);
 
@@ -2090,15 +2151,15 @@ void __init vfs_caches_init(unsigned long mempages)
        mempages -= reserve;
 
        names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 
        filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 
-       dcache_init(mempages);
-       inode_init(mempages);
+       dcache_init();
+       inode_init();
        files_init(mempages);
-       mnt_init(mempages);
+       mnt_init();
        bdev_cache_init();
        chrdev_init();
 }