nfsd: Revert "svcrpc: take advantage of tcp autotuning"
authorJ. Bruce Fields <bfields@citi.umich.edu>
Wed, 27 May 2009 22:51:06 +0000 (18:51 -0400)
committerJ. Bruce Fields <bfields@citi.umich.edu>
Wed, 27 May 2009 22:51:06 +0000 (18:51 -0400)
This reverts commit 47a14ef1af48c696b214ac168f056ddc79793d0e "svcrpc:
take advantage of tcp autotuning", which uncovered some further problems
in the server rpc code, causing significant performance regressions in
common cases.

We will likely reinstate this patch after releasing 2.6.30 and applying
some work on the underlying fixes to the problem (developed by Trond).

Reported-by: Jeff Moyer <jmoyer@redhat.com>
Cc: Olga Kornievskaia <aglo@citi.umich.edu>
Cc: Jim Rees <rees@umich.edu>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
net/sunrpc/svcsock.c

index af31988..9d50423 100644 (file)
@@ -345,6 +345,7 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
        lock_sock(sock->sk);
        sock->sk->sk_sndbuf = snd * 2;
        sock->sk->sk_rcvbuf = rcv * 2;
+       sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
        release_sock(sock->sk);
 #endif
 }
@@ -796,6 +797,23 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
                test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
                test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
 
+       if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
+               /* sndbuf needs to have room for one request
+                * per thread, otherwise we can stall even when the
+                * network isn't a bottleneck.
+                *
+                * We count all threads rather than threads in a
+                * particular pool, which provides an upper bound
+                * on the number of threads which will access the socket.
+                *
+                * rcvbuf just needs to be able to hold a few requests.
+                * Normally they will be removed from the queue
+                * as soon a a complete request arrives.
+                */
+               svc_sock_setbufsize(svsk->sk_sock,
+                                   (serv->sv_nrthreads+3) * serv->sv_max_mesg,
+                                   3 * serv->sv_max_mesg);
+
        clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
 
        /* Receive data. If we haven't got the record length yet, get
@@ -1043,6 +1061,15 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
 
                tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
 
+               /* initialise setting must have enough space to
+                * receive and respond to one request.
+                * svc_tcp_recvfrom will re-adjust if necessary
+                */
+               svc_sock_setbufsize(svsk->sk_sock,
+                                   3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
+                                   3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
+
+               set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
                set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
                if (sk->sk_state != TCP_ESTABLISHED)
                        set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
@@ -1112,14 +1139,8 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
        /* Initialize the socket */
        if (sock->type == SOCK_DGRAM)
                svc_udp_init(svsk, serv);
-       else {
-               /* initialise setting must have enough space to
-                * receive and respond to one request.
-                */
-               svc_sock_setbufsize(svsk->sk_sock, 4 * serv->sv_max_mesg,
-                                       4 * serv->sv_max_mesg);
+       else
                svc_tcp_init(svsk, serv);
-       }
 
        dprintk("svc: svc_setup_socket created %p (inet %p)\n",
                                svsk, svsk->sk_sk);