mmc: s3c6410: enable ADMA feature in 6410 sdhci controller
[safe/jmp/linux-2.6] / fs / notify / inode_mark.c
index a395348..0399bcb 100644 (file)
@@ -87,8 +87,8 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
-#include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/writeback.h> /* for inode_lock */
 
 #include <asm/atomic.h>
 
@@ -189,7 +189,8 @@ void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry)
         * callback to the group function to let it know that this entry
         * is being freed.
         */
-       group->ops->freeing_mark(entry, group);
+       if (group->ops->freeing_mark)
+               group->ops->freeing_mark(entry, group);
 
        /*
         * __fsnotify_update_child_dentry_flags(inode);
@@ -204,6 +205,8 @@ void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry)
         */
 
 
+       iput(inode);
+
        /*
         * it's possible that this group tried to destroy itself, but this
         * this mark was simultaneously being freed by inode.  If that's the
@@ -306,6 +309,10 @@ int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
        struct fsnotify_mark_entry *lentry;
        int ret = 0;
 
+       inode = igrab(inode);
+       if (unlikely(!inode))
+               return -EINVAL;
+
        /*
         * LOCKING ORDER!!!!
         * entry->lock
@@ -316,11 +323,11 @@ int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
        spin_lock(&group->mark_lock);
        spin_lock(&inode->i_lock);
 
-       entry->group = group;
-       entry->inode = inode;
-
        lentry = fsnotify_find_mark_entry(group, inode);
        if (!lentry) {
+               entry->group = group;
+               entry->inode = inode;
+
                hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries);
                list_add(&entry->g_list, &group->mark_entries);
 
@@ -337,6 +344,7 @@ int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
 
        if (lentry) {
                ret = -EEXIST;
+               iput(inode);
                fsnotify_put_mark(lentry);
        } else {
                __fsnotify_update_child_dentry_flags(inode);
@@ -344,3 +352,74 @@ int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
 
        return ret;
 }
+
+/**
+ * fsnotify_unmount_inodes - an sb is unmounting.  handle any watched inodes.
+ * @list: list of inodes being unmounted (sb->s_inodes)
+ *
+ * Called with inode_lock held, protecting the unmounting super block's list
+ * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
+ * We temporarily drop inode_lock, however, and CAN block.
+ */
+void fsnotify_unmount_inodes(struct list_head *list)
+{
+       struct inode *inode, *next_i, *need_iput = NULL;
+
+       list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
+               struct inode *need_iput_tmp;
+
+               /*
+                * We cannot __iget() an inode in state I_CLEAR, I_FREEING,
+                * I_WILL_FREE, or I_NEW which is fine because by that point
+                * the inode cannot have any associated watches.
+                */
+               if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW))
+                       continue;
+
+               /*
+                * If i_count is zero, the inode cannot have any watches and
+                * doing an __iget/iput with MS_ACTIVE clear would actually
+                * evict all inodes with zero i_count from icache which is
+                * unnecessarily violent and may in fact be illegal to do.
+                */
+               if (!atomic_read(&inode->i_count))
+                       continue;
+
+               need_iput_tmp = need_iput;
+               need_iput = NULL;
+
+               /* In case fsnotify_inode_delete() drops a reference. */
+               if (inode != need_iput_tmp)
+                       __iget(inode);
+               else
+                       need_iput_tmp = NULL;
+
+               /* In case the dropping of a reference would nuke next_i. */
+               if ((&next_i->i_sb_list != list) &&
+                   atomic_read(&next_i->i_count) &&
+                   !(next_i->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))) {
+                       __iget(next_i);
+                       need_iput = next_i;
+               }
+
+               /*
+                * We can safely drop inode_lock here because we hold
+                * references on both inode and next_i.  Also no new inodes
+                * will be added since the umount has begun.  Finally,
+                * iprune_mutex keeps shrink_icache_memory() away.
+                */
+               spin_unlock(&inode_lock);
+
+               if (need_iput_tmp)
+                       iput(need_iput_tmp);
+
+               /* for each watch, send FS_UNMOUNT and then remove it */
+               fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+
+               fsnotify_inode_delete(inode);
+
+               iput(inode);
+
+               spin_lock(&inode_lock);
+       }
+}