ceph: cope with out of order (unsafe after safe) mds reply
[safe/jmp/linux-2.6] / fs / ceph / mds_client.c
index 29a93fe..40dd437 100644 (file)
@@ -1,6 +1,7 @@
 #include "ceph_debug.h"
 
 #include <linux/wait.h>
+#include <linux/slab.h>
 #include <linux/sched.h>
 
 #include "mds_client.h"
@@ -9,6 +10,7 @@
 #include "messenger.h"
 #include "decode.h"
 #include "auth.h"
+#include "pagelist.h"
 
 /*
  * A cluster of MDS (metadata server) daemons is responsible for
@@ -254,6 +256,7 @@ static const char *session_state_name(int s)
        case CEPH_MDS_SESSION_OPEN: return "open";
        case CEPH_MDS_SESSION_HUNG: return "hung";
        case CEPH_MDS_SESSION_CLOSING: return "closing";
+       case CEPH_MDS_SESSION_RESTARTING: return "restarting";
        case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
        default: return "???";
        }
@@ -307,6 +310,15 @@ static bool __have_session(struct ceph_mds_client *mdsc, int mds)
        return mdsc->sessions[mds];
 }
 
+static int __verify_registered_session(struct ceph_mds_client *mdsc,
+                                      struct ceph_mds_session *s)
+{
+       if (s->s_mds >= mdsc->max_sessions ||
+           mdsc->sessions[s->s_mds] != s)
+               return -ENOENT;
+       return 0;
+}
+
 /*
  * create+register a new session for given mds.
  * called under mdsc->mutex.
@@ -317,6 +329,8 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
        struct ceph_mds_session *s;
 
        s = kzalloc(sizeof(*s), GFP_NOFS);
+       if (!s)
+               return ERR_PTR(-ENOMEM);
        s->s_mdsc = mdsc;
        s->s_mds = mds;
        s->s_state = CEPH_MDS_SESSION_NEW;
@@ -337,10 +351,12 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
        s->s_renew_seq = 0;
        INIT_LIST_HEAD(&s->s_caps);
        s->s_nr_caps = 0;
+       s->s_trim_caps = 0;
        atomic_set(&s->s_ref, 1);
        INIT_LIST_HEAD(&s->s_waiting);
        INIT_LIST_HEAD(&s->s_unsafe);
        s->s_num_cap_releases = 0;
+       s->s_cap_iterator = NULL;
        INIT_LIST_HEAD(&s->s_cap_releases);
        INIT_LIST_HEAD(&s->s_cap_releases_done);
        INIT_LIST_HEAD(&s->s_cap_flushing);
@@ -378,10 +394,11 @@ fail_realloc:
 /*
  * called under mdsc->mutex
  */
-static void unregister_session(struct ceph_mds_client *mdsc,
+static void __unregister_session(struct ceph_mds_client *mdsc,
                               struct ceph_mds_session *s)
 {
-       dout("unregister_session mds%d %p\n", s->s_mds, s);
+       dout("__unregister_session mds%d %p\n", s->s_mds, s);
+       BUG_ON(mdsc->sessions[s->s_mds] != s);
        mdsc->sessions[s->s_mds] = NULL;
        ceph_con_close(&s->s_con);
        ceph_put_mds_session(s);
@@ -445,10 +462,42 @@ static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc,
                                             u64 tid)
 {
        struct ceph_mds_request *req;
-       req = radix_tree_lookup(&mdsc->request_tree, tid);
-       if (req)
-               ceph_mdsc_get_request(req);
-       return req;
+       struct rb_node *n = mdsc->request_tree.rb_node;
+
+       while (n) {
+               req = rb_entry(n, struct ceph_mds_request, r_node);
+               if (tid < req->r_tid)
+                       n = n->rb_left;
+               else if (tid > req->r_tid)
+                       n = n->rb_right;
+               else {
+                       ceph_mdsc_get_request(req);
+                       return req;
+               }
+       }
+       return NULL;
+}
+
+static void __insert_request(struct ceph_mds_client *mdsc,
+                            struct ceph_mds_request *new)
+{
+       struct rb_node **p = &mdsc->request_tree.rb_node;
+       struct rb_node *parent = NULL;
+       struct ceph_mds_request *req = NULL;
+
+       while (*p) {
+               parent = *p;
+               req = rb_entry(parent, struct ceph_mds_request, r_node);
+               if (new->r_tid < req->r_tid)
+                       p = &(*p)->rb_left;
+               else if (new->r_tid > req->r_tid)
+                       p = &(*p)->rb_right;
+               else
+                       BUG();
+       }
+
+       rb_link_node(&new->r_node, parent, p);
+       rb_insert_color(&new->r_node, &mdsc->request_tree);
 }
 
 /*
@@ -466,7 +515,7 @@ static void __register_request(struct ceph_mds_client *mdsc,
                ceph_reserve_caps(&req->r_caps_reservation, req->r_num_caps);
        dout("__register_request %p tid %lld\n", req, req->r_tid);
        ceph_mdsc_get_request(req);
-       radix_tree_insert(&mdsc->request_tree, req->r_tid, (void *)req);
+       __insert_request(mdsc, req);
 
        if (dir) {
                struct ceph_inode_info *ci = ceph_inode(dir);
@@ -482,8 +531,8 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
                                 struct ceph_mds_request *req)
 {
        dout("__unregister_request %p tid %lld\n", req, req->r_tid);
-       radix_tree_delete(&mdsc->request_tree, req->r_tid);
-       ceph_mdsc_put_request(req);
+       rb_erase(&req->r_node, &mdsc->request_tree);
+       RB_CLEAR_NODE(&req->r_node);
 
        if (req->r_unsafe_dir) {
                struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
@@ -492,6 +541,8 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
                list_del_init(&req->r_unsafe_dir_item);
                spin_unlock(&ci->i_unsafe_lock);
        }
+
+       ceph_mdsc_put_request(req);
 }
 
 /*
@@ -614,10 +665,10 @@ static struct ceph_msg *create_session_msg(u32 op, u64 seq)
        struct ceph_msg *msg;
        struct ceph_mds_session_head *h;
 
-       msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), 0, 0, NULL);
-       if (IS_ERR(msg)) {
+       msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h));
+       if (!msg) {
                pr_err("create_session_msg ENOMEM creating msg\n");
-               return ERR_PTR(PTR_ERR(msg));
+               return NULL;
        }
        h = msg->front.iov_base;
        h->op = cpu_to_le32(op);
@@ -636,7 +687,6 @@ static int __open_session(struct ceph_mds_client *mdsc,
        struct ceph_msg *msg;
        int mstate;
        int mds = session->s_mds;
-       int err = 0;
 
        /* wait for mds to go active? */
        mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
@@ -647,13 +697,9 @@ static int __open_session(struct ceph_mds_client *mdsc,
 
        /* send connect message */
        msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq);
-       if (IS_ERR(msg)) {
-               err = PTR_ERR(msg);
-               goto out;
-       }
+       if (!msg)
+               return -ENOMEM;
        ceph_con_send(&session->s_con, msg);
-
-out:
        return 0;
 }
 
@@ -685,43 +731,117 @@ static void cleanup_cap_releases(struct ceph_mds_session *session)
 }
 
 /*
- * Helper to safely iterate over all caps associated with a session.
+ * Helper to safely iterate over all caps associated with a session, with
+ * special care taken to handle a racing __ceph_remove_cap().
  *
- * caller must hold session s_mutex
+ * Caller must hold session s_mutex.
  */
 static int iterate_session_caps(struct ceph_mds_session *session,
                                 int (*cb)(struct inode *, struct ceph_cap *,
                                            void *), void *arg)
 {
-       struct ceph_cap *cap, *ncap;
-       struct inode *inode;
+       struct list_head *p;
+       struct ceph_cap *cap;
+       struct inode *inode, *last_inode = NULL;
+       struct ceph_cap *old_cap = NULL;
        int ret;
 
        dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
        spin_lock(&session->s_cap_lock);
-       list_for_each_entry_safe(cap, ncap, &session->s_caps, session_caps) {
+       p = session->s_caps.next;
+       while (p != &session->s_caps) {
+               cap = list_entry(p, struct ceph_cap, session_caps);
                inode = igrab(&cap->ci->vfs_inode);
-               if (!inode)
+               if (!inode) {
+                       p = p->next;
                        continue;
+               }
+               session->s_cap_iterator = cap;
                spin_unlock(&session->s_cap_lock);
+
+               if (last_inode) {
+                       iput(last_inode);
+                       last_inode = NULL;
+               }
+               if (old_cap) {
+                       ceph_put_cap(old_cap);
+                       old_cap = NULL;
+               }
+
                ret = cb(inode, cap, arg);
-               iput(inode);
-               if (ret < 0)
-                       return ret;
+               last_inode = inode;
+
                spin_lock(&session->s_cap_lock);
+               p = p->next;
+               if (cap->ci == NULL) {
+                       dout("iterate_session_caps  finishing cap %p removal\n",
+                            cap);
+                       BUG_ON(cap->session != session);
+                       list_del_init(&cap->session_caps);
+                       session->s_nr_caps--;
+                       cap->session = NULL;
+                       old_cap = cap;  /* put_cap it w/o locks held */
+               }
+               if (ret < 0)
+                       goto out;
        }
+       ret = 0;
+out:
+       session->s_cap_iterator = NULL;
        spin_unlock(&session->s_cap_lock);
 
-       return 0;
+       if (last_inode)
+               iput(last_inode);
+       if (old_cap)
+               ceph_put_cap(old_cap);
+
+       return ret;
 }
 
 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
-                                  void *arg)
+                                 void *arg)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
+       int drop = 0;
+
        dout("removing cap %p, ci is %p, inode is %p\n",
             cap, ci, &ci->vfs_inode);
-       ceph_remove_cap(cap);
+       spin_lock(&inode->i_lock);
+       __ceph_remove_cap(cap);
+       if (!__ceph_is_any_real_caps(ci)) {
+               struct ceph_mds_client *mdsc =
+                       &ceph_sb_to_client(inode->i_sb)->mdsc;
+
+               spin_lock(&mdsc->cap_dirty_lock);
+               if (!list_empty(&ci->i_dirty_item)) {
+                       pr_info(" dropping dirty %s state for %p %lld\n",
+                               ceph_cap_string(ci->i_dirty_caps),
+                               inode, ceph_ino(inode));
+                       ci->i_dirty_caps = 0;
+                       list_del_init(&ci->i_dirty_item);
+                       drop = 1;
+               }
+               if (!list_empty(&ci->i_flushing_item)) {
+                       pr_info(" dropping dirty+flushing %s state for %p %lld\n",
+                               ceph_cap_string(ci->i_flushing_caps),
+                               inode, ceph_ino(inode));
+                       ci->i_flushing_caps = 0;
+                       list_del_init(&ci->i_flushing_item);
+                       mdsc->num_cap_flushing--;
+                       drop = 1;
+               }
+               if (drop && ci->i_wrbuffer_ref) {
+                       pr_info(" dropping dirty data for %p %lld\n",
+                               inode, ceph_ino(inode));
+                       ci->i_wrbuffer_ref = 0;
+                       ci->i_wrbuffer_ref_head = 0;
+                       drop++;
+               }
+               spin_unlock(&mdsc->cap_dirty_lock);
+       }
+       spin_unlock(&inode->i_lock);
+       while (drop--)
+               iput(inode);
        return 0;
 }
 
@@ -733,6 +853,7 @@ static void remove_session_caps(struct ceph_mds_session *session)
        dout("remove_session_caps on %p\n", session);
        iterate_session_caps(session, remove_session_caps_cb, NULL);
        BUG_ON(session->s_nr_caps > 0);
+       BUG_ON(!list_empty(&session->s_cap_flushing));
        cleanup_cap_releases(session);
 }
 
@@ -780,6 +901,7 @@ static int send_renew_caps(struct ceph_mds_client *mdsc,
        if (time_after_eq(jiffies, session->s_cap_ttl) &&
            time_after_eq(session->s_cap_ttl, session->s_renew_requested))
                pr_info("mds%d caps stale\n", session->s_mds);
+       session->s_renew_requested = jiffies;
 
        /* do not try to renew caps until a recovering mds has reconnected
         * with its clients. */
@@ -792,11 +914,10 @@ static int send_renew_caps(struct ceph_mds_client *mdsc,
 
        dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
                ceph_mds_state_name(state));
-       session->s_renew_requested = jiffies;
        msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
                                 ++session->s_renew_seq);
-       if (IS_ERR(msg))
-               return PTR_ERR(msg);
+       if (!msg)
+               return -ENOMEM;
        ceph_con_send(&session->s_con, msg);
        return 0;
 }
@@ -843,17 +964,15 @@ static int request_close_session(struct ceph_mds_client *mdsc,
                                 struct ceph_mds_session *session)
 {
        struct ceph_msg *msg;
-       int err = 0;
 
        dout("request_close_session mds%d state %s seq %lld\n",
             session->s_mds, session_state_name(session->s_state),
             session->s_seq);
        msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
-       if (IS_ERR(msg))
-               err = PTR_ERR(msg);
-       else
-               ceph_con_send(&session->s_con, msg);
-       return err;
+       if (!msg)
+               return -ENOMEM;
+       ceph_con_send(&session->s_con, msg);
+       return 0;
 }
 
 /*
@@ -903,7 +1022,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
        session->s_trim_caps--;
        if (oissued) {
                /* we aren't the only cap.. just remove us */
-               __ceph_remove_cap(cap, NULL);
+               __ceph_remove_cap(cap);
        } else {
                /* try to drop referring dentries */
                spin_unlock(&inode->i_lock);
@@ -935,6 +1054,7 @@ static int trim_caps(struct ceph_mds_client *mdsc,
                dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
                     session->s_mds, session->s_nr_caps, max_caps,
                        trim_caps - session->s_trim_caps);
+               session->s_trim_caps = 0;
        }
        return 0;
 }
@@ -969,8 +1089,7 @@ static int add_cap_releases(struct ceph_mds_client *mdsc,
 
        while (session->s_num_cap_releases < session->s_nr_caps + extra) {
                spin_unlock(&session->s_cap_lock);
-               msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE,
-                                  0, 0, NULL);
+               msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE);
                if (!msg)
                        goto out_unlocked;
                dout("add_cap_releases %p msg %p now %d\n", session, msg,
@@ -1062,10 +1181,8 @@ static void send_cap_releases(struct ceph_mds_client *mdsc,
        struct ceph_msg *msg;
 
        dout("send_cap_releases mds%d\n", session->s_mds);
-       while (1) {
-               spin_lock(&session->s_cap_lock);
-               if (list_empty(&session->s_cap_releases_done))
-                       break;
+       spin_lock(&session->s_cap_lock);
+       while (!list_empty(&session->s_cap_releases_done)) {
                msg = list_first_entry(&session->s_cap_releases_done,
                                 struct ceph_msg, list_head);
                list_del_init(&msg->list_head);
@@ -1073,7 +1190,46 @@ static void send_cap_releases(struct ceph_mds_client *mdsc,
                msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
                dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
                ceph_con_send(&session->s_con, msg);
+               spin_lock(&session->s_cap_lock);
+       }
+       spin_unlock(&session->s_cap_lock);
+}
+
+static void discard_cap_releases(struct ceph_mds_client *mdsc,
+                                struct ceph_mds_session *session)
+{
+       struct ceph_msg *msg;
+       struct ceph_mds_cap_release *head;
+       unsigned num;
+
+       dout("discard_cap_releases mds%d\n", session->s_mds);
+       spin_lock(&session->s_cap_lock);
+
+       /* zero out the in-progress message */
+       msg = list_first_entry(&session->s_cap_releases,
+                              struct ceph_msg, list_head);
+       head = msg->front.iov_base;
+       num = le32_to_cpu(head->num);
+       dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg, num);
+       head->num = cpu_to_le32(0);
+       session->s_num_cap_releases += num;
+
+       /* requeue completed messages */
+       while (!list_empty(&session->s_cap_releases_done)) {
+               msg = list_first_entry(&session->s_cap_releases_done,
+                                struct ceph_msg, list_head);
+               list_del_init(&msg->list_head);
+
+               head = msg->front.iov_base;
+               num = le32_to_cpu(head->num);
+               dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg,
+                    num);
+               session->s_num_cap_releases += num;
+               head->num = cpu_to_le32(0);
+               msg->front.iov_len = sizeof(*head);
+               list_add(&msg->list_head, &session->s_cap_releases);
        }
+
        spin_unlock(&session->s_cap_lock);
 }
 
@@ -1092,6 +1248,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
        if (!req)
                return ERR_PTR(-ENOMEM);
 
+       mutex_init(&req->r_fill_mutex);
        req->r_started = jiffies;
        req->r_resend_mds = -1;
        INIT_LIST_HEAD(&req->r_unsafe_dir_item);
@@ -1108,17 +1265,25 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
 }
 
 /*
- * return oldest (lowest) tid in request tree, 0 if none.
+ * return oldest (lowest) request, tid in request tree, 0 if none.
  *
  * called under mdsc->mutex.
  */
+static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
+{
+       if (RB_EMPTY_ROOT(&mdsc->request_tree))
+               return NULL;
+       return rb_entry(rb_first(&mdsc->request_tree),
+                       struct ceph_mds_request, r_node);
+}
+
 static u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
 {
-       struct ceph_mds_request *first;
-       if (radix_tree_gang_lookup(&mdsc->request_tree,
-                                  (void **)&first, 0, 1) <= 0)
-               return 0;
-       return first->r_tid;
+       struct ceph_mds_request *req = __get_oldest_req(mdsc);
+
+       if (req)
+               return req->r_tid;
+       return 0;
 }
 
 /*
@@ -1154,7 +1319,7 @@ retry:
                        len += 1 + temp->d_name.len;
                temp = temp->d_parent;
                if (temp == NULL) {
-                       pr_err("build_path_dentry corrupt dentry %p\n", dentry);
+                       pr_err("build_path corrupt dentry %p\n", dentry);
                        return ERR_PTR(-EINVAL);
                }
        }
@@ -1170,7 +1335,7 @@ retry:
                struct inode *inode = temp->d_inode;
 
                if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
-                       dout("build_path_dentry path+%d: %p SNAPDIR\n",
+                       dout("build_path path+%d: %p SNAPDIR\n",
                             pos, temp);
                } else if (stop_on_nosnap && inode &&
                           ceph_snap(inode) == CEPH_NOSNAP) {
@@ -1181,20 +1346,18 @@ retry:
                                break;
                        strncpy(path + pos, temp->d_name.name,
                                temp->d_name.len);
-                       dout("build_path_dentry path+%d: %p '%.*s'\n",
-                            pos, temp, temp->d_name.len, path + pos);
                }
                if (pos)
                        path[--pos] = '/';
                temp = temp->d_parent;
                if (temp == NULL) {
-                       pr_err("build_path_dentry corrupt dentry\n");
+                       pr_err("build_path corrupt dentry\n");
                        kfree(path);
                        return ERR_PTR(-EINVAL);
                }
        }
        if (pos != 0) {
-               pr_err("build_path_dentry did not end path lookup where "
+               pr_err("build_path did not end path lookup where "
                       "expected, namelen is %d, pos is %d\n", len, pos);
                /* presumably this is only possible if racing with a
                   rename of one of the parent directories (we can not
@@ -1206,7 +1369,7 @@ retry:
 
        *base = ceph_ino(temp->d_inode);
        *plen = len;
-       dout("build_path_dentry on %p %d built %llx '%.*s'\n",
+       dout("build_path on %p %d built %llx '%.*s'\n",
             dentry, atomic_read(&dentry->d_count), *base, len, path);
        return path;
 }
@@ -1318,7 +1481,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
        }
 
        len = sizeof(*head) +
-               pathlen1 + pathlen2 + 2*(sizeof(u32) + sizeof(u64));
+               pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64));
 
        /* calculate (max) length for cap releases */
        len += sizeof(struct ceph_mds_request_release) *
@@ -1329,9 +1492,13 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
        if (req->r_old_dentry_drop)
                len += req->r_old_dentry->d_name.len;
 
-       msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, 0, 0, NULL);
-       if (IS_ERR(msg))
+       msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len);
+       if (!msg) {
+               msg = ERR_PTR(-ENOMEM);
                goto out_free2;
+       }
+
+       msg->hdr.tid = cpu_to_le64(req->r_tid);
 
        head = msg->front.iov_base;
        p = msg->front.iov_base + sizeof(*head);
@@ -1418,14 +1585,13 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
        }
        msg = create_request_message(mdsc, req, mds);
        if (IS_ERR(msg)) {
-               req->r_reply = ERR_PTR(PTR_ERR(msg));
+               req->r_err = PTR_ERR(msg);
                complete_request(mdsc, req);
-               return -PTR_ERR(msg);
+               return PTR_ERR(msg);
        }
        req->r_request = msg;
 
        rhead = msg->front.iov_base;
-       rhead->tid = cpu_to_le64(req->r_tid);
        rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
        if (req->r_got_unsafe)
                flags |= CEPH_MDS_FLAG_REPLAY;
@@ -1454,7 +1620,7 @@ static int __do_request(struct ceph_mds_client *mdsc,
        int mds = -1;
        int err = -EAGAIN;
 
-       if (req->r_reply)
+       if (req->r_err || req->r_got_result)
                goto out;
 
        if (req->r_timeout &&
@@ -1474,8 +1640,13 @@ static int __do_request(struct ceph_mds_client *mdsc,
 
        /* get, open session */
        session = __ceph_lookup_mds_session(mdsc, mds);
-       if (!session)
+       if (!session) {
                session = register_session(mdsc, mds);
+               if (IS_ERR(session)) {
+                       err = PTR_ERR(session);
+                       goto finish;
+               }
+       }
        dout("do_request mds%d session %p state %s\n", mds, session,
             session_state_name(session->s_state));
        if (session->s_state != CEPH_MDS_SESSION_OPEN &&
@@ -1506,7 +1677,7 @@ out:
        return err;
 
 finish:
-       req->r_reply = ERR_PTR(err);
+       req->r_err = err;
        complete_request(mdsc, req);
        goto out;
 }
@@ -1527,31 +1698,23 @@ static void __wake_requests(struct ceph_mds_client *mdsc,
 
 /*
  * Wake up threads with requests pending for @mds, so that they can
- * resubmit their requests to a possibly different mds.  If @all is set,
- * wake up if their requests has been forwarded to @mds, too.
+ * resubmit their requests to a possibly different mds.
  */
-static void kick_requests(struct ceph_mds_client *mdsc, int mds, int all)
+static void kick_requests(struct ceph_mds_client *mdsc, int mds)
 {
-       struct ceph_mds_request *reqs[10];
-       u64 nexttid = 0;
-       int i, got;
+       struct ceph_mds_request *req;
+       struct rb_node *p;
 
        dout("kick_requests mds%d\n", mds);
-       while (nexttid <= mdsc->last_tid) {
-               got = radix_tree_gang_lookup(&mdsc->request_tree,
-                                            (void **)&reqs, nexttid, 10);
-               if (got == 0)
-                       break;
-               nexttid = reqs[got-1]->r_tid + 1;
-               for (i = 0; i < got; i++) {
-                       if (reqs[i]->r_got_unsafe)
-                               continue;
-                       if (reqs[i]->r_session &&
-                           reqs[i]->r_session->s_mds == mds) {
-                               dout(" kicking tid %llu\n", reqs[i]->r_tid);
-                               put_request_session(reqs[i]);
-                               __do_request(mdsc, reqs[i]);
-                       }
+       for (p = rb_first(&mdsc->request_tree); p; p = rb_next(p)) {
+               req = rb_entry(p, struct ceph_mds_request, r_node);
+               if (req->r_got_unsafe)
+                       continue;
+               if (req->r_session &&
+                   req->r_session->s_mds == mds) {
+                       dout(" kicking tid %llu\n", req->r_tid);
+                       put_request_session(req);
+                       __do_request(mdsc, req);
                }
        }
 }
@@ -1593,38 +1756,66 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
        __register_request(mdsc, req, dir);
        __do_request(mdsc, req);
 
+       if (req->r_err) {
+               err = req->r_err;
+               __unregister_request(mdsc, req);
+               dout("do_request early error %d\n", err);
+               goto out;
+       }
+
        /* wait */
-       if (!req->r_reply) {
-               mutex_unlock(&mdsc->mutex);
-               if (req->r_timeout) {
-                       err = wait_for_completion_timeout(&req->r_completion,
-                                                         req->r_timeout);
-                       if (err > 0)
-                               err = 0;
-                       else if (err == 0)
-                               req->r_reply = ERR_PTR(-EIO);
-               } else {
-                       wait_for_completion(&req->r_completion);
-               }
-               mutex_lock(&mdsc->mutex);
+       mutex_unlock(&mdsc->mutex);
+       dout("do_request waiting\n");
+       if (req->r_timeout) {
+               err = (long)wait_for_completion_interruptible_timeout(
+                       &req->r_completion, req->r_timeout);
+               if (err == 0)
+                       err = -EIO;
+       } else {
+               err = wait_for_completion_interruptible(&req->r_completion);
        }
+       dout("do_request waited, got %d\n", err);
+       mutex_lock(&mdsc->mutex);
 
-       if (IS_ERR(req->r_reply)) {
-               err = PTR_ERR(req->r_reply);
-               req->r_reply = NULL;
+       /* only abort if we didn't race with a real reply */
+       if (req->r_got_result) {
+               err = le32_to_cpu(req->r_reply_info.head->result);
+       } else if (err < 0) {
+               dout("aborted request %lld with %d\n", req->r_tid, err);
 
-               /* clean up */
-               __unregister_request(mdsc, req);
-               if (!list_empty(&req->r_unsafe_item))
-                       list_del_init(&req->r_unsafe_item);
-               complete(&req->r_safe_completion);
-       } else if (req->r_err) {
-               err = req->r_err;
+               /*
+                * ensure we aren't running concurrently with
+                * ceph_fill_trace or ceph_readdir_prepopulate, which
+                * rely on locks (dir mutex) held by our caller.
+                */
+               mutex_lock(&req->r_fill_mutex);
+               req->r_err = err;
+               req->r_aborted = true;
+               mutex_unlock(&req->r_fill_mutex);
+
+               if (req->r_locked_dir &&
+                   (req->r_op & CEPH_MDS_OP_WRITE)) {
+                       struct ceph_inode_info *ci =
+                               ceph_inode(req->r_locked_dir);
+
+                       dout("aborted, clearing I_COMPLETE on %p, leases\n",
+                            req->r_locked_dir);
+                       spin_lock(&req->r_locked_dir->i_lock);
+                       ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
+                       ci->i_release_count++;
+                       spin_unlock(&req->r_locked_dir->i_lock);
+
+                       if (req->r_dentry)
+                               ceph_invalidate_dentry_lease(req->r_dentry);
+                       if (req->r_old_dentry)
+                               ceph_invalidate_dentry_lease(req->r_old_dentry);
+               }
        } else {
-               err = le32_to_cpu(req->r_reply_info.head->result);
+               err = req->r_err;
        }
-       mutex_unlock(&mdsc->mutex);
 
+out:
+       mutex_unlock(&mdsc->mutex);
        dout("do_request %p done, result %d\n", req, err);
        return err;
 }
@@ -1644,10 +1835,8 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
        u64 tid;
        int err, result;
-       int mds;
+       int mds = session->s_mds;
 
-       if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
-               return;
        if (msg->front.iov_len < sizeof(*head)) {
                pr_err("mdsc_handle_reply got corrupt (short) reply\n");
                ceph_msg_dump(msg);
@@ -1655,7 +1844,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        }
 
        /* get request, session */
-       tid = le64_to_cpu(head->tid);
+       tid = le64_to_cpu(msg->hdr.tid);
        mutex_lock(&mdsc->mutex);
        req = __lookup_request(mdsc, tid);
        if (!req) {
@@ -1664,10 +1853,9 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
                return;
        }
        dout("handle_reply %p\n", req);
-       mds = le64_to_cpu(msg->hdr.src.name.num);
 
        /* correct session? */
-       if (!req->r_session && req->r_session != session) {
+       if (req->r_session != session) {
                pr_err("mdsc_handle_reply got %llu on session mds%d"
                       " not mds%d\n", tid, session->s_mds,
                       req->r_session ? req->r_session->s_mds : -1);
@@ -1683,6 +1871,12 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
                mutex_unlock(&mdsc->mutex);
                goto out;
        }
+       if (req->r_got_safe && !head->safe) {
+               pr_warning("got unsafe after safe on %llu from mds%d\n",
+                          tid, mds);
+               mutex_unlock(&mdsc->mutex);
+               goto out;
+       }
 
        result = le32_to_cpu(head->result);
 
@@ -1719,16 +1913,12 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
                        list_del_init(&req->r_unsafe_item);
 
                        /* last unsafe request during umount? */
-                       if (mdsc->stopping && !__get_oldest_tid(mdsc))
+                       if (mdsc->stopping && !__get_oldest_req(mdsc))
                                complete(&mdsc->safe_umount_waiters);
                        mutex_unlock(&mdsc->mutex);
                        goto out;
                }
-       }
-
-       BUG_ON(req->r_reply);
-
-       if (!head->safe) {
+       } else {
                req->r_got_unsafe = true;
                list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
        }
@@ -1757,21 +1947,30 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        }
 
        /* insert trace into our cache */
+       mutex_lock(&req->r_fill_mutex);
        err = ceph_fill_trace(mdsc->client->sb, req, req->r_session);
        if (err == 0) {
                if (result == 0 && rinfo->dir_nr)
                        ceph_readdir_prepopulate(req, req->r_session);
                ceph_unreserve_caps(&req->r_caps_reservation);
        }
+       mutex_unlock(&req->r_fill_mutex);
 
        up_read(&mdsc->snap_rwsem);
 out_err:
-       if (err) {
-               req->r_err = err;
+       mutex_lock(&mdsc->mutex);
+       if (!req->r_aborted) {
+               if (err) {
+                       req->r_err = err;
+               } else {
+                       req->r_reply = msg;
+                       ceph_msg_get(msg);
+                       req->r_got_result = true;
+               }
        } else {
-               req->r_reply = msg;
-               ceph_msg_get(msg);
+               dout("reply arrived after request %lld was aborted\n", tid);
        }
+       mutex_unlock(&mdsc->mutex);
 
        add_cap_releases(mdsc, req->r_session, -1);
        mutex_unlock(&session->s_mutex);
@@ -1788,38 +1987,29 @@ out:
 /*
  * handle mds notification that our request has been forwarded.
  */
-static void handle_forward(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
+static void handle_forward(struct ceph_mds_client *mdsc,
+                          struct ceph_mds_session *session,
+                          struct ceph_msg *msg)
 {
        struct ceph_mds_request *req;
-       u64 tid;
+       u64 tid = le64_to_cpu(msg->hdr.tid);
        u32 next_mds;
        u32 fwd_seq;
-       u8 must_resend;
        int err = -EINVAL;
        void *p = msg->front.iov_base;
        void *end = p + msg->front.iov_len;
-       int from_mds, state;
 
-       if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
-               goto bad;
-       from_mds = le64_to_cpu(msg->hdr.src.name.num);
-
-       ceph_decode_need(&p, end, sizeof(u64)+2*sizeof(u32), bad);
-       tid = ceph_decode_64(&p);
+       ceph_decode_need(&p, end, 2*sizeof(u32), bad);
        next_mds = ceph_decode_32(&p);
        fwd_seq = ceph_decode_32(&p);
-       must_resend = ceph_decode_8(&p);
-
-       WARN_ON(must_resend);  /* shouldn't happen. */
 
        mutex_lock(&mdsc->mutex);
        req = __lookup_request(mdsc, tid);
        if (!req) {
-               dout("forward %llu dne\n", tid);
+               dout("forward %llu to mds%d - req dne\n", tid, next_mds);
                goto out;  /* dup reply? */
        }
 
-       state = mdsc->sessions[next_mds]->s_state;
        if (fwd_seq <= req->r_num_fwd) {
                dout("forward %llu to mds%d - old seq %d <= %d\n",
                     tid, next_mds, req->r_num_fwd, fwd_seq);
@@ -1849,14 +2039,10 @@ static void handle_session(struct ceph_mds_session *session,
        struct ceph_mds_client *mdsc = session->s_mdsc;
        u32 op;
        u64 seq;
-       int mds;
+       int mds = session->s_mds;
        struct ceph_mds_session_head *h = msg->front.iov_base;
        int wake = 0;
 
-       if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
-               return;
-       mds = le64_to_cpu(msg->hdr.src.name.num);
-
        /* decode */
        if (msg->front.iov_len != sizeof(*h))
                goto bad;
@@ -1864,6 +2050,8 @@ static void handle_session(struct ceph_mds_session *session,
        seq = le64_to_cpu(h->seq);
 
        mutex_lock(&mdsc->mutex);
+       if (op == CEPH_SESSION_CLOSE)
+               __unregister_session(mdsc, session);
        /* FIXME: this ttl calculation is generous */
        session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
        mutex_unlock(&mdsc->mutex);
@@ -1881,6 +2069,8 @@ static void handle_session(struct ceph_mds_session *session,
 
        switch (op) {
        case CEPH_SESSION_OPEN:
+               if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
+                       pr_info("mds%d reconnect success\n", session->s_mds);
                session->s_state = CEPH_MDS_SESSION_OPEN;
                renewed_caps(mdsc, session, 0);
                wake = 1;
@@ -1894,11 +2084,12 @@ static void handle_session(struct ceph_mds_session *session,
                break;
 
        case CEPH_SESSION_CLOSE:
-               unregister_session(mdsc, session);
+               if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
+                       pr_info("mds%d reconnect denied\n", session->s_mds);
                remove_session_caps(session);
                wake = 1; /* for good measure */
                complete(&mdsc->session_close_waiters);
-               kick_requests(mdsc, mds, 0);      /* cur only */
+               kick_requests(mdsc, mds);
                break;
 
        case CEPH_SESSION_STALE:
@@ -1961,20 +2152,12 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
 /*
  * Encode information about a cap for a reconnect with the MDS.
  */
-struct encode_caps_data {
-       void **pp;
-       void *end;
-       int *num_caps;
-};
-
 static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
                          void *arg)
 {
-       struct ceph_mds_cap_reconnect *rec;
+       struct ceph_mds_cap_reconnect rec;
        struct ceph_inode_info *ci;
-       struct encode_caps_data *data = (struct encode_caps_data *)arg;
-       void *p = *(data->pp);
-       void *end = data->end;
+       struct ceph_pagelist *pagelist = arg;
        char *path;
        int pathlen, err;
        u64 pathbase;
@@ -1985,8 +2168,9 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
        dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
             inode, ceph_vinop(inode), cap, cap->cap_id,
             ceph_cap_string(cap->issued));
-       ceph_decode_need(&p, end, sizeof(u64), needmore);
-       ceph_encode_64(&p, ceph_ino(inode));
+       err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
+       if (err)
+               return err;
 
        dentry = d_find_alias(inode);
        if (dentry) {
@@ -1999,33 +2183,29 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
                path = NULL;
                pathlen = 0;
        }
-       ceph_decode_need(&p, end, pathlen+4, needmore);
-       ceph_encode_string(&p, end, path, pathlen);
+       err = ceph_pagelist_encode_string(pagelist, path, pathlen);
+       if (err)
+               goto out;
 
-       ceph_decode_need(&p, end, sizeof(*rec), needmore);
-       rec = p;
-       p += sizeof(*rec);
-       BUG_ON(p > end);
        spin_lock(&inode->i_lock);
        cap->seq = 0;        /* reset cap seq */
        cap->issue_seq = 0;  /* and issue_seq */
-       rec->cap_id = cpu_to_le64(cap->cap_id);
-       rec->pathbase = cpu_to_le64(pathbase);
-       rec->wanted = cpu_to_le32(__ceph_caps_wanted(ci));
-       rec->issued = cpu_to_le32(cap->issued);
-       rec->size = cpu_to_le64(inode->i_size);
-       ceph_encode_timespec(&rec->mtime, &inode->i_mtime);
-       ceph_encode_timespec(&rec->atime, &inode->i_atime);
-       rec->snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
+       rec.cap_id = cpu_to_le64(cap->cap_id);
+       rec.pathbase = cpu_to_le64(pathbase);
+       rec.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
+       rec.issued = cpu_to_le32(cap->issued);
+       rec.size = cpu_to_le64(inode->i_size);
+       ceph_encode_timespec(&rec.mtime, &inode->i_mtime);
+       ceph_encode_timespec(&rec.atime, &inode->i_atime);
+       rec.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
        spin_unlock(&inode->i_lock);
 
+       err = ceph_pagelist_append(pagelist, &rec, sizeof(rec));
+
+out:
        kfree(path);
        dput(dentry);
-       (*data->num_caps)++;
-       *(data->pp) = p;
-       return 0;
-needmore:
-       return -ENOSPC;
+       return err;
 }
 
 
@@ -2041,150 +2221,96 @@ needmore:
  *
  * called with mdsc->mutex held.
  */
-static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds)
+static void send_mds_reconnect(struct ceph_mds_client *mdsc,
+                              struct ceph_mds_session *session)
 {
-       struct ceph_mds_session *session;
        struct ceph_msg *reply;
-       int newlen, len = 4 + 1;
-       void *p, *end;
-       int err;
-       int num_caps, num_realms = 0;
-       int got;
-       u64 next_snap_ino = 0;
-       __le32 *pnum_caps, *pnum_realms;
-       struct encode_caps_data iter_args;
-
-       pr_info("reconnect to recovering mds%d\n", mds);
+       struct rb_node *p;
+       int mds = session->s_mds;
+       int err = -ENOMEM;
+       struct ceph_pagelist *pagelist;
 
-       /* find session */
-       session = __ceph_lookup_mds_session(mdsc, mds);
-       mutex_unlock(&mdsc->mutex);    /* drop lock for duration */
+       pr_info("mds%d reconnect start\n", mds);
 
-       if (session) {
-               mutex_lock(&session->s_mutex);
+       pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
+       if (!pagelist)
+               goto fail_nopagelist;
+       ceph_pagelist_init(pagelist);
 
-               session->s_state = CEPH_MDS_SESSION_RECONNECTING;
-               session->s_seq = 0;
+       reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0);
+       if (!reply)
+               goto fail_nomsg;
 
-               ceph_con_open(&session->s_con,
-                             ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
+       mutex_lock(&session->s_mutex);
+       session->s_state = CEPH_MDS_SESSION_RECONNECTING;
+       session->s_seq = 0;
 
-               /* replay unsafe requests */
-               replay_unsafe_requests(mdsc, session);
+       ceph_con_open(&session->s_con,
+                     ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
 
-               /* estimate needed space */
-               len += session->s_nr_caps *
-                       (100+sizeof(struct ceph_mds_cap_reconnect));
-               pr_info("estimating i need %d bytes for %d caps\n",
-                    len, session->s_nr_caps);
-       } else {
-               dout("no session for mds%d, will send short reconnect\n",
-                    mds);
-       }
+       /* replay unsafe requests */
+       replay_unsafe_requests(mdsc, session);
 
        down_read(&mdsc->snap_rwsem);
 
-retry:
-       /* build reply */
-       reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, len, 0, 0, NULL);
-       if (IS_ERR(reply)) {
-               err = PTR_ERR(reply);
-               pr_err("send_mds_reconnect ENOMEM on %d for mds%d\n",
-                      len, mds);
-               goto out;
-       }
-       p = reply->front.iov_base;
-       end = p + len;
-
-       if (!session) {
-               ceph_encode_8(&p, 1); /* session was closed */
-               ceph_encode_32(&p, 0);
-               goto send;
-       }
        dout("session %p state %s\n", session,
             session_state_name(session->s_state));
 
+       /* drop old cap expires; we're about to reestablish that state */
+       discard_cap_releases(mdsc, session);
+
        /* traverse this session's caps */
-       ceph_encode_8(&p, 0);
-       pnum_caps = p;
-       ceph_encode_32(&p, session->s_nr_caps);
-       num_caps = 0;
-
-       iter_args.pp = &p;
-       iter_args.end = end;
-       iter_args.num_caps = &num_caps;
-       err = iterate_session_caps(session, encode_caps_cb, &iter_args);
-       if (err == -ENOSPC)
-               goto needmore;
+       err = ceph_pagelist_encode_32(pagelist, session->s_nr_caps);
+       if (err)
+               goto fail;
+       err = iterate_session_caps(session, encode_caps_cb, pagelist);
        if (err < 0)
-               goto out;
-       *pnum_caps = cpu_to_le32(num_caps);
+               goto fail;
 
        /*
         * snaprealms.  we provide mds with the ino, seq (version), and
         * parent for all of our realms.  If the mds has any newer info,
         * it will tell us.
         */
-       next_snap_ino = 0;
-       /* save some space for the snaprealm count */
-       pnum_realms = p;
-       ceph_decode_need(&p, end, sizeof(*pnum_realms), needmore);
-       p += sizeof(*pnum_realms);
-       num_realms = 0;
-       while (1) {
-               struct ceph_snap_realm *realm;
-               struct ceph_mds_snaprealm_reconnect *sr_rec;
-               got = radix_tree_gang_lookup(&mdsc->snap_realms,
-                                            (void **)&realm, next_snap_ino, 1);
-               if (!got)
-                       break;
+       for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
+               struct ceph_snap_realm *realm =
+                       rb_entry(p, struct ceph_snap_realm, node);
+               struct ceph_mds_snaprealm_reconnect sr_rec;
 
                dout(" adding snap realm %llx seq %lld parent %llx\n",
                     realm->ino, realm->seq, realm->parent_ino);
-               ceph_decode_need(&p, end, sizeof(*sr_rec), needmore);
-               sr_rec = p;
-               sr_rec->ino = cpu_to_le64(realm->ino);
-               sr_rec->seq = cpu_to_le64(realm->seq);
-               sr_rec->parent = cpu_to_le64(realm->parent_ino);
-               p += sizeof(*sr_rec);
-               num_realms++;
-               next_snap_ino = realm->ino + 1;
-       }
-       *pnum_realms = cpu_to_le32(num_realms);
-
-send:
-       reply->front.iov_len = p - reply->front.iov_base;
-       reply->hdr.front_len = cpu_to_le32(reply->front.iov_len);
-       dout("final len was %u (guessed %d)\n",
-            (unsigned)reply->front.iov_len, len);
+               sr_rec.ino = cpu_to_le64(realm->ino);
+               sr_rec.seq = cpu_to_le64(realm->seq);
+               sr_rec.parent = cpu_to_le64(realm->parent_ino);
+               err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
+               if (err)
+                       goto fail;
+       }
+
+       reply->pagelist = pagelist;
+       reply->hdr.data_len = cpu_to_le32(pagelist->length);
+       reply->nr_pages = calc_pages_for(0, pagelist->length);
        ceph_con_send(&session->s_con, reply);
 
-       if (session) {
-               session->s_state = CEPH_MDS_SESSION_OPEN;
-               __wake_requests(mdsc, &session->s_waiting);
-       }
+       mutex_unlock(&session->s_mutex);
 
-out:
-       up_read(&mdsc->snap_rwsem);
-       if (session) {
-               mutex_unlock(&session->s_mutex);
-               ceph_put_mds_session(session);
-       }
        mutex_lock(&mdsc->mutex);
+       __wake_requests(mdsc, &session->s_waiting);
+       mutex_unlock(&mdsc->mutex);
+
+       up_read(&mdsc->snap_rwsem);
        return;
 
-needmore:
-       /*
-        * we need a larger buffer.  this doesn't very accurately
-        * factor in snap realms, but it's safe.
-        */
-       num_caps += num_realms;
-       newlen = len * ((100 * (session->s_nr_caps+3)) / (num_caps + 1)) / 100;
-       pr_info("i guessed %d, and did %d of %d caps, retrying with %d\n",
-            len, num_caps, session->s_nr_caps, newlen);
-       len = newlen;
+fail:
        ceph_msg_put(reply);
-       goto retry;
+       up_read(&mdsc->snap_rwsem);
+       mutex_unlock(&session->s_mutex);
+fail_nomsg:
+       ceph_pagelist_release(pagelist);
+       kfree(pagelist);
+fail_nopagelist:
+       pr_err("error %d preparing reconnect for mds%d\n", err, mds);
+       return;
 }
 
 
@@ -2224,7 +2350,7 @@ static void check_new_map(struct ceph_mds_client *mdsc,
                                /* the session never opened, just close it
                                 * out now */
                                __wake_requests(mdsc, &s->s_waiting);
-                               unregister_session(mdsc, s);
+                               __unregister_session(mdsc, s);
                        } else {
                                /* just close it */
                                mutex_unlock(&mdsc->mutex);
@@ -2236,7 +2362,7 @@ static void check_new_map(struct ceph_mds_client *mdsc,
                        }
 
                        /* kick any requests waiting on the recovering mds */
-                       kick_requests(mdsc, i, 1);
+                       kick_requests(mdsc, i);
                } else if (oldstate == newstate) {
                        continue;  /* nothing new with this mds */
                }
@@ -2245,22 +2371,21 @@ static void check_new_map(struct ceph_mds_client *mdsc,
                 * send reconnect?
                 */
                if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
-                   newstate >= CEPH_MDS_STATE_RECONNECT)
-                       send_mds_reconnect(mdsc, i);
+                   newstate >= CEPH_MDS_STATE_RECONNECT) {
+                       mutex_unlock(&mdsc->mutex);
+                       send_mds_reconnect(mdsc, s);
+                       mutex_lock(&mdsc->mutex);
+               }
 
                /*
-                * kick requests on any mds that has gone active.
-                *
-                * kick requests on cur or forwarder: we may have sent
-                * the request to mds1, mds1 told us it forwarded it
-                * to mds2, but then we learn mds1 failed and can't be
-                * sure it successfully forwarded our request before
-                * it died.
+                * kick request on any mds that has gone active.
                 */
                if (oldstate < CEPH_MDS_STATE_ACTIVE &&
                    newstate >= CEPH_MDS_STATE_ACTIVE) {
-                       pr_info("mds%d reconnect completed\n", s->s_mds);
-                       kick_requests(mdsc, i, 1);
+                       if (oldstate != CEPH_MDS_STATE_CREATING &&
+                           oldstate != CEPH_MDS_STATE_STARTING)
+                               pr_info("mds%d recovery completed\n", s->s_mds);
+                       kick_requests(mdsc, i);
                        ceph_kick_flushing_caps(mdsc, s);
                        wake_up_session_caps(s, 1);
                }
@@ -2284,24 +2409,22 @@ void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
        di->lease_session = NULL;
 }
 
-static void handle_lease(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
+static void handle_lease(struct ceph_mds_client *mdsc,
+                        struct ceph_mds_session *session,
+                        struct ceph_msg *msg)
 {
        struct super_block *sb = mdsc->client->sb;
        struct inode *inode;
-       struct ceph_mds_session *session;
        struct ceph_inode_info *ci;
        struct dentry *parent, *dentry;
        struct ceph_dentry_info *di;
-       int mds;
+       int mds = session->s_mds;
        struct ceph_mds_lease *h = msg->front.iov_base;
        struct ceph_vino vino;
        int mask;
        struct qstr dname;
        int release = 0;
 
-       if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
-               return;
-       mds = le64_to_cpu(msg->hdr.src.name.num);
        dout("handle_lease from mds%d\n", mds);
 
        /* decode */
@@ -2315,15 +2438,6 @@ static void handle_lease(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
        if (dname.len != get_unaligned_le32(h+1))
                goto bad;
 
-       /* find session */
-       mutex_lock(&mdsc->mutex);
-       session = __ceph_lookup_mds_session(mdsc, mds);
-       mutex_unlock(&mdsc->mutex);
-       if (!session) {
-               pr_err("handle_lease got lease but no session mds%d\n", mds);
-               return;
-       }
-
        mutex_lock(&session->s_mutex);
        session->s_seq++;
 
@@ -2392,7 +2506,6 @@ release:
 out:
        iput(inode);
        mutex_unlock(&session->s_mutex);
-       ceph_put_mds_session(session);
        return;
 
 bad:
@@ -2415,8 +2528,8 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
        dnamelen = dentry->d_name.len;
        len += dnamelen;
 
-       msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, 0, 0, NULL);
-       if (IS_ERR(msg))
+       msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len);
+       if (!msg)
                return;
        lease = msg->front.iov_base;
        lease->action = action;
@@ -2561,7 +2674,9 @@ static void delayed_work(struct work_struct *work)
                else
                        ceph_con_keepalive(&s->s_con);
                add_cap_releases(mdsc, s, -1);
-               send_cap_releases(mdsc, s);
+               if (s->s_state == CEPH_MDS_SESSION_OPEN ||
+                   s->s_state == CEPH_MDS_SESSION_HUNG)
+                       send_cap_releases(mdsc, s);
                mutex_unlock(&s->s_mutex);
                ceph_put_mds_session(s);
 
@@ -2578,6 +2693,9 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
        mdsc->client = client;
        mutex_init(&mdsc->mutex);
        mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
+       if (mdsc->mdsmap == NULL)
+               return -ENOMEM;
+
        init_completion(&mdsc->safe_umount_waiters);
        init_completion(&mdsc->session_close_waiters);
        INIT_LIST_HEAD(&mdsc->waiting_for_map);
@@ -2585,11 +2703,11 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
        mdsc->max_sessions = 0;
        mdsc->stopping = 0;
        init_rwsem(&mdsc->snap_rwsem);
-       INIT_RADIX_TREE(&mdsc->snap_realms, GFP_NOFS);
+       mdsc->snap_realms = RB_ROOT;
        INIT_LIST_HEAD(&mdsc->snap_empty);
        spin_lock_init(&mdsc->snap_empty_lock);
        mdsc->last_tid = 0;
-       INIT_RADIX_TREE(&mdsc->request_tree, GFP_NOFS);
+       mdsc->request_tree = RB_ROOT;
        INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
        mdsc->last_renew_caps = jiffies;
        INIT_LIST_HEAD(&mdsc->cap_delay_list);
@@ -2603,6 +2721,7 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
        init_waitqueue_head(&mdsc->cap_flushing_wq);
        spin_lock_init(&mdsc->dentry_lru_lock);
        INIT_LIST_HEAD(&mdsc->dentry_lru);
+
        return 0;
 }
 
@@ -2616,20 +2735,19 @@ static void wait_requests(struct ceph_mds_client *mdsc)
        struct ceph_client *client = mdsc->client;
 
        mutex_lock(&mdsc->mutex);
-       if (__get_oldest_tid(mdsc)) {
+       if (__get_oldest_req(mdsc)) {
                mutex_unlock(&mdsc->mutex);
+
                dout("wait_requests waiting for requests\n");
                wait_for_completion_timeout(&mdsc->safe_umount_waiters,
                                    client->mount_args->mount_timeout * HZ);
-               mutex_lock(&mdsc->mutex);
 
                /* tear down remaining requests */
-               while (radix_tree_gang_lookup(&mdsc->request_tree,
-                                             (void **)&req, 0, 1)) {
+               mutex_lock(&mdsc->mutex);
+               while ((req = __get_oldest_req(mdsc))) {
                        dout("wait_requests timed out on tid %llu\n",
                             req->r_tid);
-                       radix_tree_delete(&mdsc->request_tree, req->r_tid);
-                       ceph_mdsc_put_request(req);
+                       __unregister_request(mdsc, req);
                }
        }
        mutex_unlock(&mdsc->mutex);
@@ -2655,31 +2773,41 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
  */
 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
 {
-       struct ceph_mds_request *req;
-       u64 next_tid = 0;
-       int got;
+       struct ceph_mds_request *req = NULL, *nextreq;
+       struct rb_node *n;
 
        mutex_lock(&mdsc->mutex);
        dout("wait_unsafe_requests want %lld\n", want_tid);
-       while (1) {
-               got = radix_tree_gang_lookup(&mdsc->request_tree, (void **)&req,
-                                            next_tid, 1);
-               if (!got)
-                       break;
-               if (req->r_tid > want_tid)
-                       break;
-
-               next_tid = req->r_tid + 1;
-               if ((req->r_op & CEPH_MDS_OP_WRITE) == 0)
-                       continue;  /* not a write op */
-
-               ceph_mdsc_get_request(req);
-               mutex_unlock(&mdsc->mutex);
-               dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
-                    req->r_tid, want_tid);
-               wait_for_completion(&req->r_safe_completion);
-               mutex_lock(&mdsc->mutex);
-               ceph_mdsc_put_request(req);
+restart:
+       req = __get_oldest_req(mdsc);
+       while (req && req->r_tid <= want_tid) {
+               /* find next request */
+               n = rb_next(&req->r_node);
+               if (n)
+                       nextreq = rb_entry(n, struct ceph_mds_request, r_node);
+               else
+                       nextreq = NULL;
+               if ((req->r_op & CEPH_MDS_OP_WRITE)) {
+                       /* write op */
+                       ceph_mdsc_get_request(req);
+                       if (nextreq)
+                               ceph_mdsc_get_request(nextreq);
+                       mutex_unlock(&mdsc->mutex);
+                       dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
+                            req->r_tid, want_tid);
+                       wait_for_completion(&req->r_safe_completion);
+                       mutex_lock(&mdsc->mutex);
+                       ceph_mdsc_put_request(req);
+                       if (!nextreq)
+                               break;  /* next dne before, so we're done! */
+                       if (RB_EMPTY_NODE(&nextreq->r_node)) {
+                               /* next request was removed from tree */
+                               ceph_mdsc_put_request(nextreq);
+                               goto restart;
+                       }
+                       ceph_mdsc_put_request(nextreq);  /* won't go away */
+               }
+               req = nextreq;
        }
        mutex_unlock(&mdsc->mutex);
        dout("wait_unsafe_requests done\n");
@@ -2689,6 +2817,9 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
 {
        u64 want_tid, want_flush;
 
+       if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN)
+               return;
+
        dout("sync\n");
        mutex_lock(&mdsc->mutex);
        want_tid = mdsc->last_tid;
@@ -2752,7 +2883,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
        for (i = 0; i < mdsc->max_sessions; i++) {
                if (mdsc->sessions[i]) {
                        session = get_session(mdsc->sessions[i]);
-                       unregister_session(mdsc, session);
+                       __unregister_session(mdsc, session);
                        mutex_unlock(&mdsc->mutex);
                        mutex_lock(&session->s_mutex);
                        remove_session_caps(session);
@@ -2849,8 +2980,7 @@ static struct ceph_connection *con_get(struct ceph_connection *con)
        struct ceph_mds_session *s = con->private;
 
        if (get_session(s)) {
-               dout("mdsc con_get %p %d -> %d\n", s,
-                    atomic_read(&s->s_ref) - 1, atomic_read(&s->s_ref));
+               dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref));
                return con;
        }
        dout("mdsc con_get %p FAIL\n", s);
@@ -2861,9 +2991,8 @@ static void con_put(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
 
-       dout("mdsc con_put %p %d -> %d\n", s, atomic_read(&s->s_ref),
-            atomic_read(&s->s_ref) - 1);
        ceph_put_mds_session(s);
+       dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref));
 }
 
 /*
@@ -2873,9 +3002,10 @@ static void con_put(struct ceph_connection *con)
 static void peer_reset(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
+       struct ceph_mds_client *mdsc = s->s_mdsc;
 
-       pr_err("mds%d gave us the boot.  IMPLEMENT RECONNECT.\n",
-              s->s_mds);
+       pr_warning("mds%d closed our session\n", s->s_mds);
+       send_mds_reconnect(mdsc, s);
 }
 
 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
@@ -2884,6 +3014,13 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
        struct ceph_mds_client *mdsc = s->s_mdsc;
        int type = le16_to_cpu(msg->hdr.type);
 
+       mutex_lock(&mdsc->mutex);
+       if (__verify_registered_session(mdsc, s) < 0) {
+               mutex_unlock(&mdsc->mutex);
+               goto out;
+       }
+       mutex_unlock(&mdsc->mutex);
+
        switch (type) {
        case CEPH_MSG_MDS_MAP:
                ceph_mdsc_handle_map(mdsc, msg);
@@ -2895,22 +3032,23 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
                handle_reply(s, msg);
                break;
        case CEPH_MSG_CLIENT_REQUEST_FORWARD:
-               handle_forward(mdsc, msg);
+               handle_forward(mdsc, s, msg);
                break;
        case CEPH_MSG_CLIENT_CAPS:
                ceph_handle_caps(s, msg);
                break;
        case CEPH_MSG_CLIENT_SNAP:
-               ceph_handle_snap(mdsc, msg);
+               ceph_handle_snap(mdsc, s, msg);
                break;
        case CEPH_MSG_CLIENT_LEASE:
-               handle_lease(mdsc, msg);
+               handle_lease(mdsc, s, msg);
                break;
 
        default:
                pr_err("received unknown message type %d %s\n", type,
                       ceph_msg_type_name(type));
        }
+out:
        ceph_msg_put(msg);
 }
 
@@ -2962,15 +3100,26 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len)
        return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len);
 }
 
+static int invalidate_authorizer(struct ceph_connection *con)
+{
+       struct ceph_mds_session *s = con->private;
+       struct ceph_mds_client *mdsc = s->s_mdsc;
+       struct ceph_auth_client *ac = mdsc->client->monc.auth;
+
+       if (ac->ops->invalidate_authorizer)
+               ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
+
+       return ceph_monc_validate_auth(&mdsc->client->monc);
+}
+
 const static struct ceph_connection_operations mds_con_ops = {
        .get = con_get,
        .put = con_put,
        .dispatch = dispatch,
        .get_authorizer = get_authorizer,
        .verify_authorizer_reply = verify_authorizer_reply,
+       .invalidate_authorizer = invalidate_authorizer,
        .peer_reset = peer_reset,
-       .alloc_msg = ceph_alloc_msg,
-       .alloc_middle = ceph_alloc_middle,
 };