xfs: fix access to upper inodes without inode64
[safe/jmp/linux-2.6] / fs / ceph / inode.c
index d7d5d49..85b4d2f 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/namei.h>
 #include <linux/writeback.h>
 #include <linux/vmalloc.h>
+#include <linux/pagevec.h>
 
 #include "super.h"
 #include "decode.h"
@@ -377,6 +378,22 @@ void ceph_destroy_inode(struct inode *inode)
 
        ceph_queue_caps_release(inode);
 
+       /*
+        * we may still have a snap_realm reference if there are stray
+        * caps in i_cap_exporting_issued or i_snap_caps.
+        */
+       if (ci->i_snap_realm) {
+               struct ceph_mds_client *mdsc =
+                       &ceph_client(ci->vfs_inode.i_sb)->mdsc;
+               struct ceph_snap_realm *realm = ci->i_snap_realm;
+
+               dout(" dropping residual ref to snap realm %p\n", realm);
+               spin_lock(&realm->inodes_with_caps_lock);
+               list_del_init(&ci->i_snap_realm_item);
+               spin_unlock(&realm->inodes_with_caps_lock);
+               ceph_put_snap_realm(mdsc, realm);
+       }
+
        kfree(ci->i_symlink);
        while ((n = rb_first(&ci->i_fragtree)) != NULL) {
                frag = rb_entry(n, struct ceph_inode_frag, node);
@@ -716,6 +733,10 @@ no_change:
                                __ceph_get_fmode(ci, cap_fmode);
                        spin_unlock(&inode->i_lock);
                }
+       } else if (cap_fmode >= 0) {
+               pr_warning("mds issued no caps on %llx.%llx\n",
+                          ceph_vinop(inode));
+               __ceph_get_fmode(ci, cap_fmode);
        }
 
        /* update delegation info? */
@@ -869,6 +890,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
        struct inode *in = NULL;
        struct ceph_mds_reply_inode *ininfo;
        struct ceph_vino vino;
+       struct ceph_client *client = ceph_sb_to_client(sb);
        int i = 0;
        int err = 0;
 
@@ -932,7 +954,14 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
                        return err;
        }
 
-       if (rinfo->head->is_dentry && !req->r_aborted) {
+       /*
+        * ignore null lease/binding on snapdir ENOENT, or else we
+        * will have trouble splicing in the virtual snapdir later
+        */
+       if (rinfo->head->is_dentry && !req->r_aborted &&
+           (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
+                                              client->mount_args->snapdir_name,
+                                              req->r_dentry->d_name.len))) {
                /*
                 * lookup link rename   : null -> possibly existing inode
                 * mknod symlink mkdir  : null -> new inode
@@ -972,6 +1001,10 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
                             dn, dn->d_name.len, dn->d_name.name);
                        dout("fill_trace doing d_move %p -> %p\n",
                             req->r_old_dentry, dn);
+
+                       /* d_move screws up d_subdirs order */
+                       ceph_i_clear(dir, CEPH_I_COMPLETE);
+
                        d_move(req->r_old_dentry, dn);
                        dout(" src %p '%.*s' dst %p '%.*s'\n",
                             req->r_old_dentry,
@@ -1280,6 +1313,49 @@ void ceph_queue_invalidate(struct inode *inode)
 }
 
 /*
+ * invalidate any pages that are not dirty or under writeback.  this
+ * includes pages that are clean and mapped.
+ */
+static void ceph_invalidate_nondirty_pages(struct address_space *mapping)
+{
+       struct pagevec pvec;
+       pgoff_t next = 0;
+       int i;
+
+       pagevec_init(&pvec, 0);
+       while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+               for (i = 0; i < pagevec_count(&pvec); i++) {
+                       struct page *page = pvec.pages[i];
+                       pgoff_t index;
+                       int skip_page =
+                               (PageDirty(page) || PageWriteback(page));
+
+                       if (!skip_page)
+                               skip_page = !trylock_page(page);
+
+                       /*
+                        * We really shouldn't be looking at the ->index of an
+                        * unlocked page.  But we're not allowed to lock these
+                        * pages.  So we rely upon nobody altering the ->index
+                        * of this (pinned-by-us) page.
+                        */
+                       index = page->index;
+                       if (index > next)
+                               next = index;
+                       next++;
+
+                       if (skip_page)
+                               continue;
+
+                       generic_error_remove_page(mapping, page);
+                       unlock_page(page);
+               }
+               pagevec_release(&pvec);
+               cond_resched();
+       }
+}
+
+/*
  * Invalidate inode pages in a worker thread.  (This can't be done
  * in the message handler context.)
  */
@@ -1305,7 +1381,7 @@ static void ceph_invalidate_work(struct work_struct *work)
        orig_gen = ci->i_rdcache_gen;
        spin_unlock(&inode->i_lock);
 
-       truncate_inode_pages(&inode->i_data, 0);
+       ceph_invalidate_nondirty_pages(inode->i_mapping);
 
        spin_lock(&inode->i_lock);
        if (orig_gen == ci->i_rdcache_gen) {