2 * linux/net/sunrpc/svcsock.c
4 * These are the RPC server socket internals.
6 * The server scheduling algorithm does not always distribute the load
7 * evenly when servicing a single client. May need to modify the
8 * svc_sock_enqueue procedure...
10 * TCP support is largely untested and may be a little slow. The problem
11 * is that we currently do two separate recvfrom's, one for the 4-byte
12 * record length, and the second for the actual record. This could possibly
13 * be improved by always reading a minimum size of around 100 bytes and
14 * tucking any superfluous bytes away in a temporary store. Still, that
15 * leaves write requests out in the rain. An alternative may be to peek at
16 * the first skb in the queue, and if it matches the next TCP sequence
17 * number, to extract the record marker. Yuck.
19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/fcntl.h>
25 #include <linux/net.h>
27 #include <linux/inet.h>
28 #include <linux/udp.h>
29 #include <linux/tcp.h>
30 #include <linux/unistd.h>
31 #include <linux/slab.h>
32 #include <linux/netdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/file.h>
36 #include <net/checksum.h>
38 #include <net/tcp_states.h>
39 #include <asm/uaccess.h>
40 #include <asm/ioctls.h>
42 #include <linux/sunrpc/types.h>
43 #include <linux/sunrpc/xdr.h>
44 #include <linux/sunrpc/svcsock.h>
45 #include <linux/sunrpc/stats.h>
47 /* SMP locking strategy:
49 * svc_serv->sv_lock protects most stuff for that service.
51 * Some flags can be set to certain values at any time
52 * providing that certain rules are followed:
54 * SK_BUSY can be set to 0 at any time.
55 * svc_sock_enqueue must be called afterwards
56 * SK_CONN, SK_DATA, can be set or cleared at any time.
57 * after a set, svc_sock_enqueue must be called.
58 * after a clear, the socket must be read/accepted
59 * if this succeeds, it must be set again.
60 * SK_CLOSE can set at any time. It is never cleared.
64 #define RPCDBG_FACILITY RPCDBG_SVCSOCK
67 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
68 int *errp, int pmap_reg);
69 static void svc_udp_data_ready(struct sock *, int);
70 static int svc_udp_recvfrom(struct svc_rqst *);
71 static int svc_udp_sendto(struct svc_rqst *);
73 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk);
74 static int svc_deferred_recv(struct svc_rqst *rqstp);
75 static struct cache_deferred_req *svc_defer(struct cache_req *req);
77 /* apparently the "standard" is that clients close
78 * idle connections after 5 minutes, servers after
80 * http://www.connectathon.org/talks96/nfstcp.pdf
82 static int svc_conn_age_period = 6*60;
85 * Queue up an idle server thread. Must have serv->sv_lock held.
86 * Note: this is really a stack rather than a queue, so that we only
87 * use as many different threads as we need, and the rest don't polute
91 svc_serv_enqueue(struct svc_serv *serv, struct svc_rqst *rqstp)
93 list_add(&rqstp->rq_list, &serv->sv_threads);
97 * Dequeue an nfsd thread. Must have serv->sv_lock held.
100 svc_serv_dequeue(struct svc_serv *serv, struct svc_rqst *rqstp)
102 list_del(&rqstp->rq_list);
106 * Release an skbuff after use
109 svc_release_skb(struct svc_rqst *rqstp)
111 struct sk_buff *skb = rqstp->rq_skbuff;
112 struct svc_deferred_req *dr = rqstp->rq_deferred;
115 rqstp->rq_skbuff = NULL;
117 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
118 skb_free_datagram(rqstp->rq_sock->sk_sk, skb);
121 rqstp->rq_deferred = NULL;
127 * Any space to write?
129 static inline unsigned long
130 svc_sock_wspace(struct svc_sock *svsk)
134 if (svsk->sk_sock->type == SOCK_STREAM)
135 wspace = sk_stream_wspace(svsk->sk_sk);
137 wspace = sock_wspace(svsk->sk_sk);
143 * Queue up a socket with data pending. If there are idle nfsd
144 * processes, wake 'em up.
148 svc_sock_enqueue(struct svc_sock *svsk)
150 struct svc_serv *serv = svsk->sk_server;
151 struct svc_rqst *rqstp;
153 if (!(svsk->sk_flags &
154 ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) ))
156 if (test_bit(SK_DEAD, &svsk->sk_flags))
159 spin_lock_bh(&serv->sv_lock);
161 if (!list_empty(&serv->sv_threads) &&
162 !list_empty(&serv->sv_sockets))
164 "svc_sock_enqueue: threads and sockets both waiting??\n");
166 if (test_bit(SK_DEAD, &svsk->sk_flags)) {
167 /* Don't enqueue dead sockets */
168 dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk);
172 if (test_bit(SK_BUSY, &svsk->sk_flags)) {
173 /* Don't enqueue socket while daemon is receiving */
174 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk);
178 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
179 if (((svsk->sk_reserved + serv->sv_bufsz)*2
180 > svc_sock_wspace(svsk))
181 && !test_bit(SK_CLOSE, &svsk->sk_flags)
182 && !test_bit(SK_CONN, &svsk->sk_flags)) {
183 /* Don't enqueue while not enough space for reply */
184 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
185 svsk->sk_sk, svsk->sk_reserved+serv->sv_bufsz,
186 svc_sock_wspace(svsk));
189 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
191 /* Mark socket as busy. It will remain in this state until the
192 * server has processed all pending data and put the socket back
195 set_bit(SK_BUSY, &svsk->sk_flags);
197 if (!list_empty(&serv->sv_threads)) {
198 rqstp = list_entry(serv->sv_threads.next,
201 dprintk("svc: socket %p served by daemon %p\n",
203 svc_serv_dequeue(serv, rqstp);
206 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
207 rqstp, rqstp->rq_sock);
208 rqstp->rq_sock = svsk;
210 rqstp->rq_reserved = serv->sv_bufsz;
211 svsk->sk_reserved += rqstp->rq_reserved;
212 wake_up(&rqstp->rq_wait);
214 dprintk("svc: socket %p put into queue\n", svsk->sk_sk);
215 list_add_tail(&svsk->sk_ready, &serv->sv_sockets);
219 spin_unlock_bh(&serv->sv_lock);
223 * Dequeue the first socket. Must be called with the serv->sv_lock held.
225 static inline struct svc_sock *
226 svc_sock_dequeue(struct svc_serv *serv)
228 struct svc_sock *svsk;
230 if (list_empty(&serv->sv_sockets))
233 svsk = list_entry(serv->sv_sockets.next,
234 struct svc_sock, sk_ready);
235 list_del_init(&svsk->sk_ready);
237 dprintk("svc: socket %p dequeued, inuse=%d\n",
238 svsk->sk_sk, svsk->sk_inuse);
244 * Having read something from a socket, check whether it
245 * needs to be re-enqueued.
246 * Note: SK_DATA only gets cleared when a read-attempt finds
247 * no (or insufficient) data.
250 svc_sock_received(struct svc_sock *svsk)
252 clear_bit(SK_BUSY, &svsk->sk_flags);
253 svc_sock_enqueue(svsk);
258 * svc_reserve - change the space reserved for the reply to a request.
259 * @rqstp: The request in question
260 * @space: new max space to reserve
262 * Each request reserves some space on the output queue of the socket
263 * to make sure the reply fits. This function reduces that reserved
264 * space to be the amount of space used already, plus @space.
267 void svc_reserve(struct svc_rqst *rqstp, int space)
269 space += rqstp->rq_res.head[0].iov_len;
271 if (space < rqstp->rq_reserved) {
272 struct svc_sock *svsk = rqstp->rq_sock;
273 spin_lock_bh(&svsk->sk_server->sv_lock);
274 svsk->sk_reserved -= (rqstp->rq_reserved - space);
275 rqstp->rq_reserved = space;
276 spin_unlock_bh(&svsk->sk_server->sv_lock);
278 svc_sock_enqueue(svsk);
283 * Release a socket after use.
286 svc_sock_put(struct svc_sock *svsk)
288 struct svc_serv *serv = svsk->sk_server;
290 spin_lock_bh(&serv->sv_lock);
291 if (!--(svsk->sk_inuse) && test_bit(SK_DEAD, &svsk->sk_flags)) {
292 spin_unlock_bh(&serv->sv_lock);
293 dprintk("svc: releasing dead socket\n");
294 sock_release(svsk->sk_sock);
298 spin_unlock_bh(&serv->sv_lock);
302 svc_sock_release(struct svc_rqst *rqstp)
304 struct svc_sock *svsk = rqstp->rq_sock;
306 svc_release_skb(rqstp);
308 svc_free_allpages(rqstp);
309 rqstp->rq_res.page_len = 0;
310 rqstp->rq_res.page_base = 0;
313 /* Reset response buffer and release
315 * But first, check that enough space was reserved
316 * for the reply, otherwise we have a bug!
318 if ((rqstp->rq_res.len) > rqstp->rq_reserved)
319 printk(KERN_ERR "RPC request reserved %d but used %d\n",
323 rqstp->rq_res.head[0].iov_len = 0;
324 svc_reserve(rqstp, 0);
325 rqstp->rq_sock = NULL;
331 * External function to wake up a server waiting for data
334 svc_wake_up(struct svc_serv *serv)
336 struct svc_rqst *rqstp;
338 spin_lock_bh(&serv->sv_lock);
339 if (!list_empty(&serv->sv_threads)) {
340 rqstp = list_entry(serv->sv_threads.next,
343 dprintk("svc: daemon %p woken up.\n", rqstp);
345 svc_serv_dequeue(serv, rqstp);
346 rqstp->rq_sock = NULL;
348 wake_up(&rqstp->rq_wait);
350 spin_unlock_bh(&serv->sv_lock);
354 * Generic sendto routine
357 svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
359 struct svc_sock *svsk = rqstp->rq_sock;
360 struct socket *sock = svsk->sk_sock;
362 char buffer[CMSG_SPACE(sizeof(struct in_pktinfo))];
363 struct cmsghdr *cmh = (struct cmsghdr *)buffer;
364 struct in_pktinfo *pki = (struct in_pktinfo *)CMSG_DATA(cmh);
368 struct page **ppage = xdr->pages;
369 size_t base = xdr->page_base;
370 unsigned int pglen = xdr->page_len;
371 unsigned int flags = MSG_MORE;
375 if (rqstp->rq_prot == IPPROTO_UDP) {
376 /* set the source and destination */
378 msg.msg_name = &rqstp->rq_addr;
379 msg.msg_namelen = sizeof(rqstp->rq_addr);
382 msg.msg_flags = MSG_MORE;
384 msg.msg_control = cmh;
385 msg.msg_controllen = sizeof(buffer);
386 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
387 cmh->cmsg_level = SOL_IP;
388 cmh->cmsg_type = IP_PKTINFO;
389 pki->ipi_ifindex = 0;
390 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr;
392 if (sock_sendmsg(sock, &msg, 0) < 0)
397 if (slen == xdr->head[0].iov_len)
399 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0, xdr->head[0].iov_len, flags);
400 if (len != xdr->head[0].iov_len)
402 slen -= xdr->head[0].iov_len;
407 size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen;
411 result = kernel_sendpage(sock, *ppage, base, size, flags);
418 size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen;
423 if (xdr->tail[0].iov_len) {
424 result = kernel_sendpage(sock, rqstp->rq_respages[rqstp->rq_restailpage],
425 ((unsigned long)xdr->tail[0].iov_base)& (PAGE_SIZE-1),
426 xdr->tail[0].iov_len, 0);
432 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %x)\n",
433 rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len, xdr->len, len,
434 rqstp->rq_addr.sin_addr.s_addr);
440 * Report socket names for nfsdfs
442 static int one_sock_name(char *buf, struct svc_sock *svsk)
446 switch(svsk->sk_sk->sk_family) {
448 len = sprintf(buf, "ipv4 %s %u.%u.%u.%u %d\n",
449 svsk->sk_sk->sk_protocol==IPPROTO_UDP?
451 NIPQUAD(inet_sk(svsk->sk_sk)->rcv_saddr),
452 inet_sk(svsk->sk_sk)->num);
455 len = sprintf(buf, "*unknown-%d*\n",
456 svsk->sk_sk->sk_family);
462 svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
464 struct svc_sock *svsk, *closesk = NULL;
469 spin_lock(&serv->sv_lock);
470 list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) {
471 int onelen = one_sock_name(buf+len, svsk);
472 if (toclose && strcmp(toclose, buf+len) == 0)
477 spin_unlock(&serv->sv_lock);
479 svc_delete_socket(closesk);
482 EXPORT_SYMBOL(svc_sock_names);
485 * Check input queue length
488 svc_recv_available(struct svc_sock *svsk)
490 struct socket *sock = svsk->sk_sock;
493 err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail);
495 return (err >= 0)? avail : err;
499 * Generic recvfrom routine.
502 svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen)
508 rqstp->rq_addrlen = sizeof(rqstp->rq_addr);
509 sock = rqstp->rq_sock->sk_sock;
511 msg.msg_name = &rqstp->rq_addr;
512 msg.msg_namelen = sizeof(rqstp->rq_addr);
513 msg.msg_control = NULL;
514 msg.msg_controllen = 0;
516 msg.msg_flags = MSG_DONTWAIT;
518 len = kernel_recvmsg(sock, &msg, iov, nr, buflen, MSG_DONTWAIT);
520 /* sock_recvmsg doesn't fill in the name/namelen, so we must..
521 * possibly we should cache this in the svc_sock structure
522 * at accept time. FIXME
524 alen = sizeof(rqstp->rq_addr);
525 kernel_getpeername(sock, (struct sockaddr *)&rqstp->rq_addr, &alen);
527 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
528 rqstp->rq_sock, iov[0].iov_base, iov[0].iov_len, len);
534 * Set socket snd and rcv buffer lengths
537 svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv)
541 oldfs = get_fs(); set_fs(KERNEL_DS);
542 sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
543 (char*)&snd, sizeof(snd));
544 sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
545 (char*)&rcv, sizeof(rcv));
547 /* sock_setsockopt limits use to sysctl_?mem_max,
548 * which isn't acceptable. Until that is made conditional
549 * on not having CAP_SYS_RESOURCE or similar, we go direct...
550 * DaveM said I could!
553 sock->sk->sk_sndbuf = snd * 2;
554 sock->sk->sk_rcvbuf = rcv * 2;
555 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
556 release_sock(sock->sk);
560 * INET callback when data has been received on the socket.
563 svc_udp_data_ready(struct sock *sk, int count)
565 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
568 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
569 svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags));
570 set_bit(SK_DATA, &svsk->sk_flags);
571 svc_sock_enqueue(svsk);
573 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
574 wake_up_interruptible(sk->sk_sleep);
578 * INET callback when space is newly available on the socket.
581 svc_write_space(struct sock *sk)
583 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
586 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
587 svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags));
588 svc_sock_enqueue(svsk);
591 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
592 dprintk("RPC svc_write_space: someone sleeping on %p\n",
594 wake_up_interruptible(sk->sk_sleep);
599 * Receive a datagram from a UDP socket.
602 svc_udp_recvfrom(struct svc_rqst *rqstp)
604 struct svc_sock *svsk = rqstp->rq_sock;
605 struct svc_serv *serv = svsk->sk_server;
609 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
610 /* udp sockets need large rcvbuf as all pending
611 * requests are still in that buffer. sndbuf must
612 * also be large enough that there is enough space
613 * for one reply per thread.
615 svc_sock_setbufsize(svsk->sk_sock,
616 (serv->sv_nrthreads+3) * serv->sv_bufsz,
617 (serv->sv_nrthreads+3) * serv->sv_bufsz);
619 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
620 svc_sock_received(svsk);
621 return svc_deferred_recv(rqstp);
624 clear_bit(SK_DATA, &svsk->sk_flags);
625 while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) {
626 if (err == -EAGAIN) {
627 svc_sock_received(svsk);
630 /* possibly an icmp error */
631 dprintk("svc: recvfrom returned error %d\n", -err);
633 if (skb->tstamp.off_sec == 0) {
636 tv.tv_sec = xtime.tv_sec;
637 tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
638 skb_set_timestamp(skb, &tv);
639 /* Don't enable netstamp, sunrpc doesn't
640 need that much accuracy */
642 skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
643 set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */
646 * Maybe more packets - kick another thread ASAP.
648 svc_sock_received(svsk);
650 len = skb->len - sizeof(struct udphdr);
651 rqstp->rq_arg.len = len;
653 rqstp->rq_prot = IPPROTO_UDP;
655 /* Get sender address */
656 rqstp->rq_addr.sin_family = AF_INET;
657 rqstp->rq_addr.sin_port = skb->h.uh->source;
658 rqstp->rq_addr.sin_addr.s_addr = skb->nh.iph->saddr;
659 rqstp->rq_daddr = skb->nh.iph->daddr;
661 if (skb_is_nonlinear(skb)) {
662 /* we have to copy */
664 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
667 skb_free_datagram(svsk->sk_sk, skb);
671 skb_free_datagram(svsk->sk_sk, skb);
673 /* we can use it in-place */
674 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
675 rqstp->rq_arg.head[0].iov_len = len;
676 if (skb_checksum_complete(skb)) {
677 skb_free_datagram(svsk->sk_sk, skb);
680 rqstp->rq_skbuff = skb;
683 rqstp->rq_arg.page_base = 0;
684 if (len <= rqstp->rq_arg.head[0].iov_len) {
685 rqstp->rq_arg.head[0].iov_len = len;
686 rqstp->rq_arg.page_len = 0;
688 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
689 rqstp->rq_argused += (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE;
693 serv->sv_stats->netudpcnt++;
699 svc_udp_sendto(struct svc_rqst *rqstp)
703 error = svc_sendto(rqstp, &rqstp->rq_res);
704 if (error == -ECONNREFUSED)
705 /* ICMP error on earlier request. */
706 error = svc_sendto(rqstp, &rqstp->rq_res);
712 svc_udp_init(struct svc_sock *svsk)
714 svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
715 svsk->sk_sk->sk_write_space = svc_write_space;
716 svsk->sk_recvfrom = svc_udp_recvfrom;
717 svsk->sk_sendto = svc_udp_sendto;
719 /* initialise setting must have enough space to
720 * receive and respond to one request.
721 * svc_udp_recvfrom will re-adjust if necessary
723 svc_sock_setbufsize(svsk->sk_sock,
724 3 * svsk->sk_server->sv_bufsz,
725 3 * svsk->sk_server->sv_bufsz);
727 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
728 set_bit(SK_CHNGBUF, &svsk->sk_flags);
732 * A data_ready event on a listening socket means there's a connection
733 * pending. Do not use state_change as a substitute for it.
736 svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
738 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
740 dprintk("svc: socket %p TCP (listen) state change %d\n",
744 * This callback may called twice when a new connection
745 * is established as a child socket inherits everything
746 * from a parent LISTEN socket.
747 * 1) data_ready method of the parent socket will be called
748 * when one of child sockets become ESTABLISHED.
749 * 2) data_ready method of the child socket may be called
750 * when it receives data before the socket is accepted.
751 * In case of 2, we should ignore it silently.
753 if (sk->sk_state == TCP_LISTEN) {
755 set_bit(SK_CONN, &svsk->sk_flags);
756 svc_sock_enqueue(svsk);
758 printk("svc: socket %p: no user data\n", sk);
761 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
762 wake_up_interruptible_all(sk->sk_sleep);
766 * A state change on a connected socket means it's dying or dead.
769 svc_tcp_state_change(struct sock *sk)
771 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
773 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
774 sk, sk->sk_state, sk->sk_user_data);
777 printk("svc: socket %p: no user data\n", sk);
779 set_bit(SK_CLOSE, &svsk->sk_flags);
780 svc_sock_enqueue(svsk);
782 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
783 wake_up_interruptible_all(sk->sk_sleep);
787 svc_tcp_data_ready(struct sock *sk, int count)
789 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
791 dprintk("svc: socket %p TCP data ready (svsk %p)\n",
792 sk, sk->sk_user_data);
794 set_bit(SK_DATA, &svsk->sk_flags);
795 svc_sock_enqueue(svsk);
797 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
798 wake_up_interruptible(sk->sk_sleep);
802 * Accept a TCP connection
805 svc_tcp_accept(struct svc_sock *svsk)
807 struct sockaddr_in sin;
808 struct svc_serv *serv = svsk->sk_server;
809 struct socket *sock = svsk->sk_sock;
810 struct socket *newsock;
811 struct svc_sock *newsvsk;
814 dprintk("svc: tcp_accept %p sock %p\n", svsk, sock);
818 clear_bit(SK_CONN, &svsk->sk_flags);
819 err = kernel_accept(sock, &newsock, O_NONBLOCK);
822 printk(KERN_WARNING "%s: no more sockets!\n",
824 else if (err != -EAGAIN && net_ratelimit())
825 printk(KERN_WARNING "%s: accept failed (err %d)!\n",
826 serv->sv_name, -err);
830 set_bit(SK_CONN, &svsk->sk_flags);
831 svc_sock_enqueue(svsk);
834 err = kernel_getpeername(newsock, (struct sockaddr *) &sin, &slen);
837 printk(KERN_WARNING "%s: peername failed (err %d)!\n",
838 serv->sv_name, -err);
839 goto failed; /* aborted connection or whatever */
842 /* Ideally, we would want to reject connections from unauthorized
843 * hosts here, but when we get encription, the IP of the host won't
844 * tell us anything. For now just warn about unpriv connections.
846 if (ntohs(sin.sin_port) >= 1024) {
848 "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n",
850 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
853 dprintk("%s: connect from %u.%u.%u.%u:%04x\n", serv->sv_name,
854 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
856 /* make sure that a write doesn't block forever when
859 newsock->sk->sk_sndtimeo = HZ*30;
861 if (!(newsvsk = svc_setup_socket(serv, newsock, &err, 0)))
865 /* make sure that we don't have too many active connections.
866 * If we have, something must be dropped.
868 * There's no point in trying to do random drop here for
869 * DoS prevention. The NFS clients does 1 reconnect in 15
870 * seconds. An attacker can easily beat that.
872 * The only somewhat efficient mechanism would be if drop
873 * old connections from the same IP first. But right now
874 * we don't even record the client IP in svc_sock.
876 if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) {
877 struct svc_sock *svsk = NULL;
878 spin_lock_bh(&serv->sv_lock);
879 if (!list_empty(&serv->sv_tempsocks)) {
880 if (net_ratelimit()) {
881 /* Try to help the admin */
882 printk(KERN_NOTICE "%s: too many open TCP "
883 "sockets, consider increasing the "
884 "number of nfsd threads\n",
886 printk(KERN_NOTICE "%s: last TCP connect from "
889 NIPQUAD(sin.sin_addr.s_addr),
890 ntohs(sin.sin_port));
893 * Always select the oldest socket. It's not fair,
896 svsk = list_entry(serv->sv_tempsocks.prev,
899 set_bit(SK_CLOSE, &svsk->sk_flags);
902 spin_unlock_bh(&serv->sv_lock);
905 svc_sock_enqueue(svsk);
912 serv->sv_stats->nettcpconn++;
917 sock_release(newsock);
922 * Receive data from a TCP socket.
925 svc_tcp_recvfrom(struct svc_rqst *rqstp)
927 struct svc_sock *svsk = rqstp->rq_sock;
928 struct svc_serv *serv = svsk->sk_server;
930 struct kvec vec[RPCSVC_MAXPAGES];
933 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
934 svsk, test_bit(SK_DATA, &svsk->sk_flags),
935 test_bit(SK_CONN, &svsk->sk_flags),
936 test_bit(SK_CLOSE, &svsk->sk_flags));
938 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
939 svc_sock_received(svsk);
940 return svc_deferred_recv(rqstp);
943 if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
944 svc_delete_socket(svsk);
948 if (test_bit(SK_CONN, &svsk->sk_flags)) {
949 svc_tcp_accept(svsk);
950 svc_sock_received(svsk);
954 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
955 /* sndbuf needs to have room for one request
956 * per thread, otherwise we can stall even when the
957 * network isn't a bottleneck.
958 * rcvbuf just needs to be able to hold a few requests.
959 * Normally they will be removed from the queue
960 * as soon a a complete request arrives.
962 svc_sock_setbufsize(svsk->sk_sock,
963 (serv->sv_nrthreads+3) * serv->sv_bufsz,
966 clear_bit(SK_DATA, &svsk->sk_flags);
968 /* Receive data. If we haven't got the record length yet, get
969 * the next four bytes. Otherwise try to gobble up as much as
970 * possible up to the complete record length.
972 if (svsk->sk_tcplen < 4) {
973 unsigned long want = 4 - svsk->sk_tcplen;
976 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
978 if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
980 svsk->sk_tcplen += len;
983 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
985 svc_sock_received(svsk);
986 return -EAGAIN; /* record header not complete */
989 svsk->sk_reclen = ntohl(svsk->sk_reclen);
990 if (!(svsk->sk_reclen & 0x80000000)) {
991 /* FIXME: technically, a record can be fragmented,
992 * and non-terminal fragments will not have the top
993 * bit set in the fragment length header.
994 * But apparently no known nfs clients send fragmented
996 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (non-terminal)\n",
997 (unsigned long) svsk->sk_reclen);
1000 svsk->sk_reclen &= 0x7fffffff;
1001 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
1002 if (svsk->sk_reclen > serv->sv_bufsz) {
1003 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (large)\n",
1004 (unsigned long) svsk->sk_reclen);
1009 /* Check whether enough data is available */
1010 len = svc_recv_available(svsk);
1014 if (len < svsk->sk_reclen) {
1015 dprintk("svc: incomplete TCP record (%d of %d)\n",
1016 len, svsk->sk_reclen);
1017 svc_sock_received(svsk);
1018 return -EAGAIN; /* record not complete */
1020 len = svsk->sk_reclen;
1021 set_bit(SK_DATA, &svsk->sk_flags);
1023 vec[0] = rqstp->rq_arg.head[0];
1026 while (vlen < len) {
1027 vec[pnum].iov_base = page_address(rqstp->rq_argpages[rqstp->rq_argused++]);
1028 vec[pnum].iov_len = PAGE_SIZE;
1033 /* Now receive data */
1034 len = svc_recvfrom(rqstp, vec, pnum, len);
1038 dprintk("svc: TCP complete record (%d bytes)\n", len);
1039 rqstp->rq_arg.len = len;
1040 rqstp->rq_arg.page_base = 0;
1041 if (len <= rqstp->rq_arg.head[0].iov_len) {
1042 rqstp->rq_arg.head[0].iov_len = len;
1043 rqstp->rq_arg.page_len = 0;
1045 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
1048 rqstp->rq_skbuff = NULL;
1049 rqstp->rq_prot = IPPROTO_TCP;
1051 /* Reset TCP read info */
1052 svsk->sk_reclen = 0;
1053 svsk->sk_tcplen = 0;
1055 svc_sock_received(svsk);
1057 serv->sv_stats->nettcpcnt++;
1062 svc_delete_socket(svsk);
1066 if (len == -EAGAIN) {
1067 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1068 svc_sock_received(svsk);
1070 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
1071 svsk->sk_server->sv_name, -len);
1079 * Send out data on TCP socket.
1082 svc_tcp_sendto(struct svc_rqst *rqstp)
1084 struct xdr_buf *xbufp = &rqstp->rq_res;
1088 /* Set up the first element of the reply kvec.
1089 * Any other kvecs that may be in use have been taken
1090 * care of by the server implementation itself.
1092 reclen = htonl(0x80000000|((xbufp->len ) - 4));
1093 memcpy(xbufp->head[0].iov_base, &reclen, 4);
1095 if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags))
1098 sent = svc_sendto(rqstp, &rqstp->rq_res);
1099 if (sent != xbufp->len) {
1100 printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
1101 rqstp->rq_sock->sk_server->sv_name,
1102 (sent<0)?"got error":"sent only",
1104 svc_delete_socket(rqstp->rq_sock);
1111 svc_tcp_init(struct svc_sock *svsk)
1113 struct sock *sk = svsk->sk_sk;
1114 struct tcp_sock *tp = tcp_sk(sk);
1116 svsk->sk_recvfrom = svc_tcp_recvfrom;
1117 svsk->sk_sendto = svc_tcp_sendto;
1119 if (sk->sk_state == TCP_LISTEN) {
1120 dprintk("setting up TCP socket for listening\n");
1121 sk->sk_data_ready = svc_tcp_listen_data_ready;
1122 set_bit(SK_CONN, &svsk->sk_flags);
1124 dprintk("setting up TCP socket for reading\n");
1125 sk->sk_state_change = svc_tcp_state_change;
1126 sk->sk_data_ready = svc_tcp_data_ready;
1127 sk->sk_write_space = svc_write_space;
1129 svsk->sk_reclen = 0;
1130 svsk->sk_tcplen = 0;
1132 tp->nonagle = 1; /* disable Nagle's algorithm */
1134 /* initialise setting must have enough space to
1135 * receive and respond to one request.
1136 * svc_tcp_recvfrom will re-adjust if necessary
1138 svc_sock_setbufsize(svsk->sk_sock,
1139 3 * svsk->sk_server->sv_bufsz,
1140 3 * svsk->sk_server->sv_bufsz);
1142 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1143 set_bit(SK_DATA, &svsk->sk_flags);
1144 if (sk->sk_state != TCP_ESTABLISHED)
1145 set_bit(SK_CLOSE, &svsk->sk_flags);
1150 svc_sock_update_bufs(struct svc_serv *serv)
1153 * The number of server threads has changed. Update
1154 * rcvbuf and sndbuf accordingly on all sockets
1156 struct list_head *le;
1158 spin_lock_bh(&serv->sv_lock);
1159 list_for_each(le, &serv->sv_permsocks) {
1160 struct svc_sock *svsk =
1161 list_entry(le, struct svc_sock, sk_list);
1162 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1164 list_for_each(le, &serv->sv_tempsocks) {
1165 struct svc_sock *svsk =
1166 list_entry(le, struct svc_sock, sk_list);
1167 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1169 spin_unlock_bh(&serv->sv_lock);
1173 * Receive the next request on any socket.
1176 svc_recv(struct svc_rqst *rqstp, long timeout)
1178 struct svc_sock *svsk =NULL;
1179 struct svc_serv *serv = rqstp->rq_server;
1182 struct xdr_buf *arg;
1183 DECLARE_WAITQUEUE(wait, current);
1185 dprintk("svc: server %p waiting for data (to = %ld)\n",
1190 "svc_recv: service %p, socket not NULL!\n",
1192 if (waitqueue_active(&rqstp->rq_wait))
1194 "svc_recv: service %p, wait queue active!\n",
1197 /* Initialize the buffers */
1198 /* first reclaim pages that were moved to response list */
1199 svc_pushback_allpages(rqstp);
1201 /* now allocate needed pages. If we get a failure, sleep briefly */
1202 pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE;
1203 while (rqstp->rq_arghi < pages) {
1204 struct page *p = alloc_page(GFP_KERNEL);
1206 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1209 rqstp->rq_argpages[rqstp->rq_arghi++] = p;
1212 /* Make arg->head point to first page and arg->pages point to rest */
1213 arg = &rqstp->rq_arg;
1214 arg->head[0].iov_base = page_address(rqstp->rq_argpages[0]);
1215 arg->head[0].iov_len = PAGE_SIZE;
1216 rqstp->rq_argused = 1;
1217 arg->pages = rqstp->rq_argpages + 1;
1219 /* save at least one page for response */
1220 arg->page_len = (pages-2)*PAGE_SIZE;
1221 arg->len = (pages-1)*PAGE_SIZE;
1222 arg->tail[0].iov_len = 0;
1229 spin_lock_bh(&serv->sv_lock);
1230 if ((svsk = svc_sock_dequeue(serv)) != NULL) {
1231 rqstp->rq_sock = svsk;
1233 rqstp->rq_reserved = serv->sv_bufsz;
1234 svsk->sk_reserved += rqstp->rq_reserved;
1236 /* No data pending. Go to sleep */
1237 svc_serv_enqueue(serv, rqstp);
1240 * We have to be able to interrupt this wait
1241 * to bring down the daemons ...
1243 set_current_state(TASK_INTERRUPTIBLE);
1244 add_wait_queue(&rqstp->rq_wait, &wait);
1245 spin_unlock_bh(&serv->sv_lock);
1247 schedule_timeout(timeout);
1251 spin_lock_bh(&serv->sv_lock);
1252 remove_wait_queue(&rqstp->rq_wait, &wait);
1254 if (!(svsk = rqstp->rq_sock)) {
1255 svc_serv_dequeue(serv, rqstp);
1256 spin_unlock_bh(&serv->sv_lock);
1257 dprintk("svc: server %p, no data yet\n", rqstp);
1258 return signalled()? -EINTR : -EAGAIN;
1261 spin_unlock_bh(&serv->sv_lock);
1263 dprintk("svc: server %p, socket %p, inuse=%d\n",
1264 rqstp, svsk, svsk->sk_inuse);
1265 len = svsk->sk_recvfrom(rqstp);
1266 dprintk("svc: got len=%d\n", len);
1268 /* No data, incomplete (TCP) read, or accept() */
1269 if (len == 0 || len == -EAGAIN) {
1270 rqstp->rq_res.len = 0;
1271 svc_sock_release(rqstp);
1274 svsk->sk_lastrecv = get_seconds();
1275 clear_bit(SK_OLD, &svsk->sk_flags);
1277 rqstp->rq_secure = ntohs(rqstp->rq_addr.sin_port) < 1024;
1278 rqstp->rq_chandle.defer = svc_defer;
1281 serv->sv_stats->netcnt++;
1289 svc_drop(struct svc_rqst *rqstp)
1291 dprintk("svc: socket %p dropped request\n", rqstp->rq_sock);
1292 svc_sock_release(rqstp);
1296 * Return reply to client.
1299 svc_send(struct svc_rqst *rqstp)
1301 struct svc_sock *svsk;
1305 if ((svsk = rqstp->rq_sock) == NULL) {
1306 printk(KERN_WARNING "NULL socket pointer in %s:%d\n",
1307 __FILE__, __LINE__);
1311 /* release the receive skb before sending the reply */
1312 svc_release_skb(rqstp);
1314 /* calculate over-all length */
1315 xb = & rqstp->rq_res;
1316 xb->len = xb->head[0].iov_len +
1318 xb->tail[0].iov_len;
1320 /* Grab svsk->sk_mutex to serialize outgoing data. */
1321 mutex_lock(&svsk->sk_mutex);
1322 if (test_bit(SK_DEAD, &svsk->sk_flags))
1325 len = svsk->sk_sendto(rqstp);
1326 mutex_unlock(&svsk->sk_mutex);
1327 svc_sock_release(rqstp);
1329 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
1335 * Timer function to close old temporary sockets, using
1336 * a mark-and-sweep algorithm.
1339 svc_age_temp_sockets(unsigned long closure)
1341 struct svc_serv *serv = (struct svc_serv *)closure;
1342 struct svc_sock *svsk;
1343 struct list_head *le, *next;
1344 LIST_HEAD(to_be_aged);
1346 dprintk("svc_age_temp_sockets\n");
1348 if (!spin_trylock_bh(&serv->sv_lock)) {
1349 /* busy, try again 1 sec later */
1350 dprintk("svc_age_temp_sockets: busy\n");
1351 mod_timer(&serv->sv_temptimer, jiffies + HZ);
1355 list_for_each_safe(le, next, &serv->sv_tempsocks) {
1356 svsk = list_entry(le, struct svc_sock, sk_list);
1358 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags))
1360 if (svsk->sk_inuse || test_bit(SK_BUSY, &svsk->sk_flags))
1363 list_move(le, &to_be_aged);
1364 set_bit(SK_CLOSE, &svsk->sk_flags);
1365 set_bit(SK_DETACHED, &svsk->sk_flags);
1367 spin_unlock_bh(&serv->sv_lock);
1369 while (!list_empty(&to_be_aged)) {
1370 le = to_be_aged.next;
1371 /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */
1373 svsk = list_entry(le, struct svc_sock, sk_list);
1375 dprintk("queuing svsk %p for closing, %lu seconds old\n",
1376 svsk, get_seconds() - svsk->sk_lastrecv);
1378 /* a thread will dequeue and close it soon */
1379 svc_sock_enqueue(svsk);
1383 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
1387 * Initialize socket for RPC use and create svc_sock struct
1388 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1390 static struct svc_sock *
1391 svc_setup_socket(struct svc_serv *serv, struct socket *sock,
1392 int *errp, int pmap_register)
1394 struct svc_sock *svsk;
1397 dprintk("svc: svc_setup_socket %p\n", sock);
1398 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) {
1405 /* Register socket with portmapper */
1406 if (*errp >= 0 && pmap_register)
1407 *errp = svc_register(serv, inet->sk_protocol,
1408 ntohs(inet_sk(inet)->sport));
1415 set_bit(SK_BUSY, &svsk->sk_flags);
1416 inet->sk_user_data = svsk;
1417 svsk->sk_sock = sock;
1419 svsk->sk_ostate = inet->sk_state_change;
1420 svsk->sk_odata = inet->sk_data_ready;
1421 svsk->sk_owspace = inet->sk_write_space;
1422 svsk->sk_server = serv;
1423 svsk->sk_lastrecv = get_seconds();
1424 INIT_LIST_HEAD(&svsk->sk_deferred);
1425 INIT_LIST_HEAD(&svsk->sk_ready);
1426 mutex_init(&svsk->sk_mutex);
1428 /* Initialize the socket */
1429 if (sock->type == SOCK_DGRAM)
1434 spin_lock_bh(&serv->sv_lock);
1435 if (!pmap_register) {
1436 set_bit(SK_TEMP, &svsk->sk_flags);
1437 list_add(&svsk->sk_list, &serv->sv_tempsocks);
1439 if (serv->sv_temptimer.function == NULL) {
1440 /* setup timer to age temp sockets */
1441 setup_timer(&serv->sv_temptimer, svc_age_temp_sockets,
1442 (unsigned long)serv);
1443 mod_timer(&serv->sv_temptimer,
1444 jiffies + svc_conn_age_period * HZ);
1447 clear_bit(SK_TEMP, &svsk->sk_flags);
1448 list_add(&svsk->sk_list, &serv->sv_permsocks);
1450 spin_unlock_bh(&serv->sv_lock);
1452 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1455 clear_bit(SK_BUSY, &svsk->sk_flags);
1456 svc_sock_enqueue(svsk);
1460 int svc_addsock(struct svc_serv *serv,
1466 struct socket *so = sockfd_lookup(fd, &err);
1467 struct svc_sock *svsk = NULL;
1471 if (so->sk->sk_family != AF_INET)
1472 err = -EAFNOSUPPORT;
1473 else if (so->sk->sk_protocol != IPPROTO_TCP &&
1474 so->sk->sk_protocol != IPPROTO_UDP)
1475 err = -EPROTONOSUPPORT;
1476 else if (so->state > SS_UNCONNECTED)
1479 svsk = svc_setup_socket(serv, so, &err, 1);
1487 if (proto) *proto = so->sk->sk_protocol;
1488 return one_sock_name(name_return, svsk);
1490 EXPORT_SYMBOL_GPL(svc_addsock);
1493 * Create socket for RPC service.
1496 svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin)
1498 struct svc_sock *svsk;
1499 struct socket *sock;
1503 dprintk("svc: svc_create_socket(%s, %d, %u.%u.%u.%u:%d)\n",
1504 serv->sv_program->pg_name, protocol,
1505 NIPQUAD(sin->sin_addr.s_addr),
1506 ntohs(sin->sin_port));
1508 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
1509 printk(KERN_WARNING "svc: only UDP and TCP "
1510 "sockets supported\n");
1513 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
1515 if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0)
1518 if (type == SOCK_STREAM)
1519 sock->sk->sk_reuse = 1; /* allow address reuse */
1520 error = kernel_bind(sock, (struct sockaddr *) sin,
1525 if (protocol == IPPROTO_TCP) {
1526 if ((error = kernel_listen(sock, 64)) < 0)
1530 if ((svsk = svc_setup_socket(serv, sock, &error, 1)) != NULL)
1534 dprintk("svc: svc_create_socket error = %d\n", -error);
1540 * Remove a dead socket
1543 svc_delete_socket(struct svc_sock *svsk)
1545 struct svc_serv *serv;
1548 dprintk("svc: svc_delete_socket(%p)\n", svsk);
1550 serv = svsk->sk_server;
1553 sk->sk_state_change = svsk->sk_ostate;
1554 sk->sk_data_ready = svsk->sk_odata;
1555 sk->sk_write_space = svsk->sk_owspace;
1557 spin_lock_bh(&serv->sv_lock);
1559 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags))
1560 list_del_init(&svsk->sk_list);
1561 list_del_init(&svsk->sk_ready);
1562 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags))
1563 if (test_bit(SK_TEMP, &svsk->sk_flags))
1566 if (!svsk->sk_inuse) {
1567 spin_unlock_bh(&serv->sv_lock);
1568 if (svsk->sk_sock->file)
1569 sockfd_put(svsk->sk_sock);
1571 sock_release(svsk->sk_sock);
1574 spin_unlock_bh(&serv->sv_lock);
1575 dprintk(KERN_NOTICE "svc: server socket destroy delayed\n");
1576 /* svsk->sk_server = NULL; */
1581 * Make a socket for nfsd and lockd
1584 svc_makesock(struct svc_serv *serv, int protocol, unsigned short port)
1586 struct sockaddr_in sin;
1588 dprintk("svc: creating socket proto = %d\n", protocol);
1589 sin.sin_family = AF_INET;
1590 sin.sin_addr.s_addr = INADDR_ANY;
1591 sin.sin_port = htons(port);
1592 return svc_create_socket(serv, protocol, &sin);
1596 * Handle defer and revisit of requests
1599 static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1601 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle);
1602 struct svc_serv *serv = dreq->owner;
1603 struct svc_sock *svsk;
1606 svc_sock_put(dr->svsk);
1610 dprintk("revisit queued\n");
1613 spin_lock_bh(&serv->sv_lock);
1614 list_add(&dr->handle.recent, &svsk->sk_deferred);
1615 spin_unlock_bh(&serv->sv_lock);
1616 set_bit(SK_DEFERRED, &svsk->sk_flags);
1617 svc_sock_enqueue(svsk);
1621 static struct cache_deferred_req *
1622 svc_defer(struct cache_req *req)
1624 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
1625 int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len);
1626 struct svc_deferred_req *dr;
1628 if (rqstp->rq_arg.page_len)
1629 return NULL; /* if more than a page, give up FIXME */
1630 if (rqstp->rq_deferred) {
1631 dr = rqstp->rq_deferred;
1632 rqstp->rq_deferred = NULL;
1634 int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
1635 /* FIXME maybe discard if size too large */
1636 dr = kmalloc(size, GFP_KERNEL);
1640 dr->handle.owner = rqstp->rq_server;
1641 dr->prot = rqstp->rq_prot;
1642 dr->addr = rqstp->rq_addr;
1643 dr->daddr = rqstp->rq_daddr;
1644 dr->argslen = rqstp->rq_arg.len >> 2;
1645 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
1647 spin_lock_bh(&rqstp->rq_server->sv_lock);
1648 rqstp->rq_sock->sk_inuse++;
1649 dr->svsk = rqstp->rq_sock;
1650 spin_unlock_bh(&rqstp->rq_server->sv_lock);
1652 dr->handle.revisit = svc_revisit;
1657 * recv data from a deferred request into an active one
1659 static int svc_deferred_recv(struct svc_rqst *rqstp)
1661 struct svc_deferred_req *dr = rqstp->rq_deferred;
1663 rqstp->rq_arg.head[0].iov_base = dr->args;
1664 rqstp->rq_arg.head[0].iov_len = dr->argslen<<2;
1665 rqstp->rq_arg.page_len = 0;
1666 rqstp->rq_arg.len = dr->argslen<<2;
1667 rqstp->rq_prot = dr->prot;
1668 rqstp->rq_addr = dr->addr;
1669 rqstp->rq_daddr = dr->daddr;
1670 return dr->argslen<<2;
1674 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1676 struct svc_deferred_req *dr = NULL;
1677 struct svc_serv *serv = svsk->sk_server;
1679 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
1681 spin_lock_bh(&serv->sv_lock);
1682 clear_bit(SK_DEFERRED, &svsk->sk_flags);
1683 if (!list_empty(&svsk->sk_deferred)) {
1684 dr = list_entry(svsk->sk_deferred.next,
1685 struct svc_deferred_req,
1687 list_del_init(&dr->handle.recent);
1688 set_bit(SK_DEFERRED, &svsk->sk_flags);
1690 spin_unlock_bh(&serv->sv_lock);