2 * linux/net/sunrpc/svcsock.c
4 * These are the RPC server socket internals.
6 * The server scheduling algorithm does not always distribute the load
7 * evenly when servicing a single client. May need to modify the
8 * svc_sock_enqueue procedure...
10 * TCP support is largely untested and may be a little slow. The problem
11 * is that we currently do two separate recvfrom's, one for the 4-byte
12 * record length, and the second for the actual record. This could possibly
13 * be improved by always reading a minimum size of around 100 bytes and
14 * tucking any superfluous bytes away in a temporary store. Still, that
15 * leaves write requests out in the rain. An alternative may be to peek at
16 * the first skb in the queue, and if it matches the next TCP sequence
17 * number, to extract the record marker. Yuck.
19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/errno.h>
25 #include <linux/fcntl.h>
26 #include <linux/net.h>
28 #include <linux/inet.h>
29 #include <linux/udp.h>
30 #include <linux/tcp.h>
31 #include <linux/unistd.h>
32 #include <linux/slab.h>
33 #include <linux/netdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/file.h>
36 #include <linux/freezer.h>
38 #include <net/checksum.h>
41 #include <net/tcp_states.h>
42 #include <asm/uaccess.h>
43 #include <asm/ioctls.h>
45 #include <linux/sunrpc/types.h>
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/sunrpc/xdr.h>
48 #include <linux/sunrpc/svcsock.h>
49 #include <linux/sunrpc/stats.h>
51 /* SMP locking strategy:
53 * svc_pool->sp_lock protects most of the fields of that pool.
54 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
55 * when both need to be taken (rare), svc_serv->sv_lock is first.
56 * BKL protects svc_serv->sv_nrthread.
57 * svc_sock->sk_lock protects the svc_sock->sk_deferred list
58 * and the ->sk_info_authunix cache.
59 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
61 * Some flags can be set to certain values at any time
62 * providing that certain rules are followed:
64 * SK_CONN, SK_DATA, can be set or cleared at any time.
65 * after a set, svc_sock_enqueue must be called.
66 * after a clear, the socket must be read/accepted
67 * if this succeeds, it must be set again.
68 * SK_CLOSE can set at any time. It is never cleared.
69 * sk_inuse contains a bias of '1' until SK_DEAD is set.
70 * so when sk_inuse hits zero, we know the socket is dead
71 * and no-one is using it.
72 * SK_DEAD can only be set while SK_BUSY is held which ensures
73 * no other thread will be using the socket or will try to
78 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
81 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
82 int *errp, int flags);
83 static void svc_delete_socket(struct svc_sock *svsk);
84 static void svc_udp_data_ready(struct sock *, int);
85 static int svc_udp_recvfrom(struct svc_rqst *);
86 static int svc_udp_sendto(struct svc_rqst *);
87 static void svc_close_socket(struct svc_sock *svsk);
88 static void svc_sock_detach(struct svc_xprt *);
89 static void svc_sock_free(struct svc_xprt *);
91 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk);
92 static int svc_deferred_recv(struct svc_rqst *rqstp);
93 static struct cache_deferred_req *svc_defer(struct cache_req *req);
95 /* apparently the "standard" is that clients close
96 * idle connections after 5 minutes, servers after
98 * http://www.connectathon.org/talks96/nfstcp.pdf
100 static int svc_conn_age_period = 6*60;
102 #ifdef CONFIG_DEBUG_LOCK_ALLOC
103 static struct lock_class_key svc_key[2];
104 static struct lock_class_key svc_slock_key[2];
106 static inline void svc_reclassify_socket(struct socket *sock)
108 struct sock *sk = sock->sk;
109 BUG_ON(sock_owned_by_user(sk));
110 switch (sk->sk_family) {
112 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
113 &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]);
117 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
118 &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]);
126 static inline void svc_reclassify_socket(struct socket *sock)
131 static char *__svc_print_addr(struct sockaddr *addr, char *buf, size_t len)
133 switch (addr->sa_family) {
135 snprintf(buf, len, "%u.%u.%u.%u, port=%u",
136 NIPQUAD(((struct sockaddr_in *) addr)->sin_addr),
137 ntohs(((struct sockaddr_in *) addr)->sin_port));
141 snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u",
142 NIP6(((struct sockaddr_in6 *) addr)->sin6_addr),
143 ntohs(((struct sockaddr_in6 *) addr)->sin6_port));
147 snprintf(buf, len, "unknown address type: %d", addr->sa_family);
154 * svc_print_addr - Format rq_addr field for printing
155 * @rqstp: svc_rqst struct containing address to print
156 * @buf: target buffer for formatted address
157 * @len: length of target buffer
160 char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
162 return __svc_print_addr(svc_addr(rqstp), buf, len);
164 EXPORT_SYMBOL_GPL(svc_print_addr);
167 * Queue up an idle server thread. Must have pool->sp_lock held.
168 * Note: this is really a stack rather than a queue, so that we only
169 * use as many different threads as we need, and the rest don't pollute
173 svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp)
175 list_add(&rqstp->rq_list, &pool->sp_threads);
179 * Dequeue an nfsd thread. Must have pool->sp_lock held.
182 svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
184 list_del(&rqstp->rq_list);
188 * Release an skbuff after use
190 static void svc_release_skb(struct svc_rqst *rqstp)
192 struct sk_buff *skb = rqstp->rq_xprt_ctxt;
193 struct svc_deferred_req *dr = rqstp->rq_deferred;
196 rqstp->rq_xprt_ctxt = NULL;
198 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
199 skb_free_datagram(rqstp->rq_sock->sk_sk, skb);
202 rqstp->rq_deferred = NULL;
208 * Queue up a socket with data pending. If there are idle nfsd
209 * processes, wake 'em up.
213 svc_sock_enqueue(struct svc_sock *svsk)
215 struct svc_serv *serv = svsk->sk_server;
216 struct svc_pool *pool;
217 struct svc_rqst *rqstp;
220 if (!(svsk->sk_flags &
221 ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) ))
223 if (test_bit(SK_DEAD, &svsk->sk_flags))
227 pool = svc_pool_for_cpu(svsk->sk_server, cpu);
230 spin_lock_bh(&pool->sp_lock);
232 if (!list_empty(&pool->sp_threads) &&
233 !list_empty(&pool->sp_sockets))
235 "svc_sock_enqueue: threads and sockets both waiting??\n");
237 if (test_bit(SK_DEAD, &svsk->sk_flags)) {
238 /* Don't enqueue dead sockets */
239 dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk);
243 /* Mark socket as busy. It will remain in this state until the
244 * server has processed all pending data and put the socket back
245 * on the idle list. We update SK_BUSY atomically because
246 * it also guards against trying to enqueue the svc_sock twice.
248 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) {
249 /* Don't enqueue socket while already enqueued */
250 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk);
253 BUG_ON(svsk->sk_pool != NULL);
254 svsk->sk_pool = pool;
256 /* Handle pending connection */
257 if (test_bit(SK_CONN, &svsk->sk_flags))
260 /* Handle close in-progress */
261 if (test_bit(SK_CLOSE, &svsk->sk_flags))
264 /* Check if we have space to reply to a request */
265 if (!svsk->sk_xprt.xpt_ops->xpo_has_wspace(&svsk->sk_xprt)) {
266 /* Don't enqueue while not enough space for reply */
267 dprintk("svc: no write space, socket %p not enqueued\n", svsk);
268 svsk->sk_pool = NULL;
269 clear_bit(SK_BUSY, &svsk->sk_flags);
274 if (!list_empty(&pool->sp_threads)) {
275 rqstp = list_entry(pool->sp_threads.next,
278 dprintk("svc: socket %p served by daemon %p\n",
280 svc_thread_dequeue(pool, rqstp);
283 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
284 rqstp, rqstp->rq_sock);
285 rqstp->rq_sock = svsk;
286 atomic_inc(&svsk->sk_inuse);
287 rqstp->rq_reserved = serv->sv_max_mesg;
288 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
289 BUG_ON(svsk->sk_pool != pool);
290 wake_up(&rqstp->rq_wait);
292 dprintk("svc: socket %p put into queue\n", svsk->sk_sk);
293 list_add_tail(&svsk->sk_ready, &pool->sp_sockets);
294 BUG_ON(svsk->sk_pool != pool);
298 spin_unlock_bh(&pool->sp_lock);
302 * Dequeue the first socket. Must be called with the pool->sp_lock held.
304 static inline struct svc_sock *
305 svc_sock_dequeue(struct svc_pool *pool)
307 struct svc_sock *svsk;
309 if (list_empty(&pool->sp_sockets))
312 svsk = list_entry(pool->sp_sockets.next,
313 struct svc_sock, sk_ready);
314 list_del_init(&svsk->sk_ready);
316 dprintk("svc: socket %p dequeued, inuse=%d\n",
317 svsk->sk_sk, atomic_read(&svsk->sk_inuse));
323 * Having read something from a socket, check whether it
324 * needs to be re-enqueued.
325 * Note: SK_DATA only gets cleared when a read-attempt finds
326 * no (or insufficient) data.
329 svc_sock_received(struct svc_sock *svsk)
331 svsk->sk_pool = NULL;
332 clear_bit(SK_BUSY, &svsk->sk_flags);
333 svc_sock_enqueue(svsk);
338 * svc_reserve - change the space reserved for the reply to a request.
339 * @rqstp: The request in question
340 * @space: new max space to reserve
342 * Each request reserves some space on the output queue of the socket
343 * to make sure the reply fits. This function reduces that reserved
344 * space to be the amount of space used already, plus @space.
347 void svc_reserve(struct svc_rqst *rqstp, int space)
349 space += rqstp->rq_res.head[0].iov_len;
351 if (space < rqstp->rq_reserved) {
352 struct svc_sock *svsk = rqstp->rq_sock;
353 atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved);
354 rqstp->rq_reserved = space;
356 svc_sock_enqueue(svsk);
361 * Release a socket after use.
364 svc_sock_put(struct svc_sock *svsk)
366 if (atomic_dec_and_test(&svsk->sk_inuse)) {
367 BUG_ON(!test_bit(SK_DEAD, &svsk->sk_flags));
368 svsk->sk_xprt.xpt_ops->xpo_free(&svsk->sk_xprt);
373 svc_sock_release(struct svc_rqst *rqstp)
375 struct svc_sock *svsk = rqstp->rq_sock;
377 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
379 svc_free_res_pages(rqstp);
380 rqstp->rq_res.page_len = 0;
381 rqstp->rq_res.page_base = 0;
384 /* Reset response buffer and release
386 * But first, check that enough space was reserved
387 * for the reply, otherwise we have a bug!
389 if ((rqstp->rq_res.len) > rqstp->rq_reserved)
390 printk(KERN_ERR "RPC request reserved %d but used %d\n",
394 rqstp->rq_res.head[0].iov_len = 0;
395 svc_reserve(rqstp, 0);
396 rqstp->rq_sock = NULL;
402 * External function to wake up a server waiting for data
403 * This really only makes sense for services like lockd
404 * which have exactly one thread anyway.
407 svc_wake_up(struct svc_serv *serv)
409 struct svc_rqst *rqstp;
411 struct svc_pool *pool;
413 for (i = 0; i < serv->sv_nrpools; i++) {
414 pool = &serv->sv_pools[i];
416 spin_lock_bh(&pool->sp_lock);
417 if (!list_empty(&pool->sp_threads)) {
418 rqstp = list_entry(pool->sp_threads.next,
421 dprintk("svc: daemon %p woken up.\n", rqstp);
423 svc_thread_dequeue(pool, rqstp);
424 rqstp->rq_sock = NULL;
426 wake_up(&rqstp->rq_wait);
428 spin_unlock_bh(&pool->sp_lock);
432 union svc_pktinfo_u {
433 struct in_pktinfo pkti;
434 struct in6_pktinfo pkti6;
436 #define SVC_PKTINFO_SPACE \
437 CMSG_SPACE(sizeof(union svc_pktinfo_u))
439 static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
441 switch (rqstp->rq_sock->sk_sk->sk_family) {
443 struct in_pktinfo *pki = CMSG_DATA(cmh);
445 cmh->cmsg_level = SOL_IP;
446 cmh->cmsg_type = IP_PKTINFO;
447 pki->ipi_ifindex = 0;
448 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr.addr.s_addr;
449 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
454 struct in6_pktinfo *pki = CMSG_DATA(cmh);
456 cmh->cmsg_level = SOL_IPV6;
457 cmh->cmsg_type = IPV6_PKTINFO;
458 pki->ipi6_ifindex = 0;
459 ipv6_addr_copy(&pki->ipi6_addr,
460 &rqstp->rq_daddr.addr6);
461 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
469 * Generic sendto routine
472 svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
474 struct svc_sock *svsk = rqstp->rq_sock;
475 struct socket *sock = svsk->sk_sock;
479 long all[SVC_PKTINFO_SPACE / sizeof(long)];
481 struct cmsghdr *cmh = &buffer.hdr;
485 struct page **ppage = xdr->pages;
486 size_t base = xdr->page_base;
487 unsigned int pglen = xdr->page_len;
488 unsigned int flags = MSG_MORE;
489 char buf[RPC_MAX_ADDRBUFLEN];
493 if (rqstp->rq_prot == IPPROTO_UDP) {
494 struct msghdr msg = {
495 .msg_name = &rqstp->rq_addr,
496 .msg_namelen = rqstp->rq_addrlen,
498 .msg_controllen = sizeof(buffer),
499 .msg_flags = MSG_MORE,
502 svc_set_cmsg_data(rqstp, cmh);
504 if (sock_sendmsg(sock, &msg, 0) < 0)
509 if (slen == xdr->head[0].iov_len)
511 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0,
512 xdr->head[0].iov_len, flags);
513 if (len != xdr->head[0].iov_len)
515 slen -= xdr->head[0].iov_len;
520 size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen;
524 result = kernel_sendpage(sock, *ppage, base, size, flags);
531 size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen;
536 if (xdr->tail[0].iov_len) {
537 result = kernel_sendpage(sock, rqstp->rq_respages[0],
538 ((unsigned long)xdr->tail[0].iov_base)
540 xdr->tail[0].iov_len, 0);
546 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n",
547 rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len,
548 xdr->len, len, svc_print_addr(rqstp, buf, sizeof(buf)));
554 * Report socket names for nfsdfs
556 static int one_sock_name(char *buf, struct svc_sock *svsk)
560 switch(svsk->sk_sk->sk_family) {
562 len = sprintf(buf, "ipv4 %s %u.%u.%u.%u %d\n",
563 svsk->sk_sk->sk_protocol==IPPROTO_UDP?
565 NIPQUAD(inet_sk(svsk->sk_sk)->rcv_saddr),
566 inet_sk(svsk->sk_sk)->num);
569 len = sprintf(buf, "*unknown-%d*\n",
570 svsk->sk_sk->sk_family);
576 svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
578 struct svc_sock *svsk, *closesk = NULL;
583 spin_lock_bh(&serv->sv_lock);
584 list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) {
585 int onelen = one_sock_name(buf+len, svsk);
586 if (toclose && strcmp(toclose, buf+len) == 0)
591 spin_unlock_bh(&serv->sv_lock);
593 /* Should unregister with portmap, but you cannot
594 * unregister just one protocol...
596 svc_close_socket(closesk);
601 EXPORT_SYMBOL(svc_sock_names);
604 * Check input queue length
607 svc_recv_available(struct svc_sock *svsk)
609 struct socket *sock = svsk->sk_sock;
612 err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail);
614 return (err >= 0)? avail : err;
618 * Generic recvfrom routine.
621 svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen)
623 struct svc_sock *svsk = rqstp->rq_sock;
624 struct msghdr msg = {
625 .msg_flags = MSG_DONTWAIT,
627 struct sockaddr *sin;
630 len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen,
633 /* sock_recvmsg doesn't fill in the name/namelen, so we must..
635 memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen);
636 rqstp->rq_addrlen = svsk->sk_remotelen;
638 /* Destination address in request is needed for binding the
639 * source address in RPC callbacks later.
641 sin = (struct sockaddr *)&svsk->sk_local;
642 switch (sin->sa_family) {
644 rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr;
647 rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr;
651 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
652 svsk, iov[0].iov_base, iov[0].iov_len, len);
658 * Set socket snd and rcv buffer lengths
661 svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv)
665 oldfs = get_fs(); set_fs(KERNEL_DS);
666 sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
667 (char*)&snd, sizeof(snd));
668 sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
669 (char*)&rcv, sizeof(rcv));
671 /* sock_setsockopt limits use to sysctl_?mem_max,
672 * which isn't acceptable. Until that is made conditional
673 * on not having CAP_SYS_RESOURCE or similar, we go direct...
674 * DaveM said I could!
677 sock->sk->sk_sndbuf = snd * 2;
678 sock->sk->sk_rcvbuf = rcv * 2;
679 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
680 release_sock(sock->sk);
684 * INET callback when data has been received on the socket.
687 svc_udp_data_ready(struct sock *sk, int count)
689 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
692 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
693 svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags));
694 set_bit(SK_DATA, &svsk->sk_flags);
695 svc_sock_enqueue(svsk);
697 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
698 wake_up_interruptible(sk->sk_sleep);
702 * INET callback when space is newly available on the socket.
705 svc_write_space(struct sock *sk)
707 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
710 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
711 svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags));
712 svc_sock_enqueue(svsk);
715 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
716 dprintk("RPC svc_write_space: someone sleeping on %p\n",
718 wake_up_interruptible(sk->sk_sleep);
722 static inline void svc_udp_get_dest_address(struct svc_rqst *rqstp,
725 switch (rqstp->rq_sock->sk_sk->sk_family) {
727 struct in_pktinfo *pki = CMSG_DATA(cmh);
728 rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr;
732 struct in6_pktinfo *pki = CMSG_DATA(cmh);
733 ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr);
740 * Receive a datagram from a UDP socket.
743 svc_udp_recvfrom(struct svc_rqst *rqstp)
745 struct svc_sock *svsk = rqstp->rq_sock;
746 struct svc_serv *serv = svsk->sk_server;
750 long all[SVC_PKTINFO_SPACE / sizeof(long)];
752 struct cmsghdr *cmh = &buffer.hdr;
754 struct msghdr msg = {
755 .msg_name = svc_addr(rqstp),
757 .msg_controllen = sizeof(buffer),
758 .msg_flags = MSG_DONTWAIT,
761 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
762 /* udp sockets need large rcvbuf as all pending
763 * requests are still in that buffer. sndbuf must
764 * also be large enough that there is enough space
765 * for one reply per thread. We count all threads
766 * rather than threads in a particular pool, which
767 * provides an upper bound on the number of threads
768 * which will access the socket.
770 svc_sock_setbufsize(svsk->sk_sock,
771 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
772 (serv->sv_nrthreads+3) * serv->sv_max_mesg);
774 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
775 svc_sock_received(svsk);
776 return svc_deferred_recv(rqstp);
779 if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
780 svc_delete_socket(svsk);
784 clear_bit(SK_DATA, &svsk->sk_flags);
786 err = kernel_recvmsg(svsk->sk_sock, &msg, NULL,
787 0, 0, MSG_PEEK | MSG_DONTWAIT);
789 skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err);
792 if (err != -EAGAIN) {
793 /* possibly an icmp error */
794 dprintk("svc: recvfrom returned error %d\n", -err);
795 set_bit(SK_DATA, &svsk->sk_flags);
797 svc_sock_received(svsk);
800 rqstp->rq_addrlen = sizeof(rqstp->rq_addr);
801 if (skb->tstamp.tv64 == 0) {
802 skb->tstamp = ktime_get_real();
803 /* Don't enable netstamp, sunrpc doesn't
804 need that much accuracy */
806 svsk->sk_sk->sk_stamp = skb->tstamp;
807 set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */
810 * Maybe more packets - kick another thread ASAP.
812 svc_sock_received(svsk);
814 len = skb->len - sizeof(struct udphdr);
815 rqstp->rq_arg.len = len;
817 rqstp->rq_prot = IPPROTO_UDP;
819 if (cmh->cmsg_level != IPPROTO_IP ||
820 cmh->cmsg_type != IP_PKTINFO) {
822 printk("rpcsvc: received unknown control message:"
824 cmh->cmsg_level, cmh->cmsg_type);
825 skb_free_datagram(svsk->sk_sk, skb);
828 svc_udp_get_dest_address(rqstp, cmh);
830 if (skb_is_nonlinear(skb)) {
831 /* we have to copy */
833 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
836 skb_free_datagram(svsk->sk_sk, skb);
840 skb_free_datagram(svsk->sk_sk, skb);
842 /* we can use it in-place */
843 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
844 rqstp->rq_arg.head[0].iov_len = len;
845 if (skb_checksum_complete(skb)) {
846 skb_free_datagram(svsk->sk_sk, skb);
849 rqstp->rq_xprt_ctxt = skb;
852 rqstp->rq_arg.page_base = 0;
853 if (len <= rqstp->rq_arg.head[0].iov_len) {
854 rqstp->rq_arg.head[0].iov_len = len;
855 rqstp->rq_arg.page_len = 0;
856 rqstp->rq_respages = rqstp->rq_pages+1;
858 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
859 rqstp->rq_respages = rqstp->rq_pages + 1 +
860 DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE);
864 serv->sv_stats->netudpcnt++;
870 svc_udp_sendto(struct svc_rqst *rqstp)
874 error = svc_sendto(rqstp, &rqstp->rq_res);
875 if (error == -ECONNREFUSED)
876 /* ICMP error on earlier request. */
877 error = svc_sendto(rqstp, &rqstp->rq_res);
882 static void svc_udp_prep_reply_hdr(struct svc_rqst *rqstp)
886 static int svc_udp_has_wspace(struct svc_xprt *xprt)
888 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
889 struct svc_serv *serv = svsk->sk_server;
890 unsigned long required;
893 * Set the SOCK_NOSPACE flag before checking the available
896 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
897 required = atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg;
898 if (required*2 > sock_wspace(svsk->sk_sk))
900 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
904 static struct svc_xprt_ops svc_udp_ops = {
905 .xpo_recvfrom = svc_udp_recvfrom,
906 .xpo_sendto = svc_udp_sendto,
907 .xpo_release_rqst = svc_release_skb,
908 .xpo_detach = svc_sock_detach,
909 .xpo_free = svc_sock_free,
910 .xpo_prep_reply_hdr = svc_udp_prep_reply_hdr,
911 .xpo_has_wspace = svc_udp_has_wspace,
914 static struct svc_xprt_class svc_udp_class = {
916 .xcl_ops = &svc_udp_ops,
917 .xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP,
921 svc_udp_init(struct svc_sock *svsk)
926 svc_xprt_init(&svc_udp_class, &svsk->sk_xprt);
927 svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
928 svsk->sk_sk->sk_write_space = svc_write_space;
930 /* initialise setting must have enough space to
931 * receive and respond to one request.
932 * svc_udp_recvfrom will re-adjust if necessary
934 svc_sock_setbufsize(svsk->sk_sock,
935 3 * svsk->sk_server->sv_max_mesg,
936 3 * svsk->sk_server->sv_max_mesg);
938 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
939 set_bit(SK_CHNGBUF, &svsk->sk_flags);
943 /* make sure we get destination address info */
944 svsk->sk_sock->ops->setsockopt(svsk->sk_sock, IPPROTO_IP, IP_PKTINFO,
945 (char __user *)&one, sizeof(one));
950 * A data_ready event on a listening socket means there's a connection
951 * pending. Do not use state_change as a substitute for it.
954 svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
956 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
958 dprintk("svc: socket %p TCP (listen) state change %d\n",
962 * This callback may called twice when a new connection
963 * is established as a child socket inherits everything
964 * from a parent LISTEN socket.
965 * 1) data_ready method of the parent socket will be called
966 * when one of child sockets become ESTABLISHED.
967 * 2) data_ready method of the child socket may be called
968 * when it receives data before the socket is accepted.
969 * In case of 2, we should ignore it silently.
971 if (sk->sk_state == TCP_LISTEN) {
973 set_bit(SK_CONN, &svsk->sk_flags);
974 svc_sock_enqueue(svsk);
976 printk("svc: socket %p: no user data\n", sk);
979 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
980 wake_up_interruptible_all(sk->sk_sleep);
984 * A state change on a connected socket means it's dying or dead.
987 svc_tcp_state_change(struct sock *sk)
989 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
991 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
992 sk, sk->sk_state, sk->sk_user_data);
995 printk("svc: socket %p: no user data\n", sk);
997 set_bit(SK_CLOSE, &svsk->sk_flags);
998 svc_sock_enqueue(svsk);
1000 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1001 wake_up_interruptible_all(sk->sk_sleep);
1005 svc_tcp_data_ready(struct sock *sk, int count)
1007 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
1009 dprintk("svc: socket %p TCP data ready (svsk %p)\n",
1010 sk, sk->sk_user_data);
1012 set_bit(SK_DATA, &svsk->sk_flags);
1013 svc_sock_enqueue(svsk);
1015 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1016 wake_up_interruptible(sk->sk_sleep);
1019 static inline int svc_port_is_privileged(struct sockaddr *sin)
1021 switch (sin->sa_family) {
1023 return ntohs(((struct sockaddr_in *)sin)->sin_port)
1026 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
1034 * Accept a TCP connection
1037 svc_tcp_accept(struct svc_sock *svsk)
1039 struct sockaddr_storage addr;
1040 struct sockaddr *sin = (struct sockaddr *) &addr;
1041 struct svc_serv *serv = svsk->sk_server;
1042 struct socket *sock = svsk->sk_sock;
1043 struct socket *newsock;
1044 struct svc_sock *newsvsk;
1046 char buf[RPC_MAX_ADDRBUFLEN];
1048 dprintk("svc: tcp_accept %p sock %p\n", svsk, sock);
1052 clear_bit(SK_CONN, &svsk->sk_flags);
1053 err = kernel_accept(sock, &newsock, O_NONBLOCK);
1056 printk(KERN_WARNING "%s: no more sockets!\n",
1058 else if (err != -EAGAIN && net_ratelimit())
1059 printk(KERN_WARNING "%s: accept failed (err %d)!\n",
1060 serv->sv_name, -err);
1064 set_bit(SK_CONN, &svsk->sk_flags);
1065 svc_sock_enqueue(svsk);
1067 err = kernel_getpeername(newsock, sin, &slen);
1069 if (net_ratelimit())
1070 printk(KERN_WARNING "%s: peername failed (err %d)!\n",
1071 serv->sv_name, -err);
1072 goto failed; /* aborted connection or whatever */
1075 /* Ideally, we would want to reject connections from unauthorized
1076 * hosts here, but when we get encryption, the IP of the host won't
1077 * tell us anything. For now just warn about unpriv connections.
1079 if (!svc_port_is_privileged(sin)) {
1080 dprintk(KERN_WARNING
1081 "%s: connect from unprivileged port: %s\n",
1083 __svc_print_addr(sin, buf, sizeof(buf)));
1085 dprintk("%s: connect from %s\n", serv->sv_name,
1086 __svc_print_addr(sin, buf, sizeof(buf)));
1088 /* make sure that a write doesn't block forever when
1091 newsock->sk->sk_sndtimeo = HZ*30;
1093 if (!(newsvsk = svc_setup_socket(serv, newsock, &err,
1094 (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY))))
1096 memcpy(&newsvsk->sk_remote, sin, slen);
1097 newsvsk->sk_remotelen = slen;
1098 err = kernel_getsockname(newsock, sin, &slen);
1099 if (unlikely(err < 0)) {
1100 dprintk("svc_tcp_accept: kernel_getsockname error %d\n", -err);
1101 slen = offsetof(struct sockaddr, sa_data);
1103 memcpy(&newsvsk->sk_local, sin, slen);
1105 svc_sock_received(newsvsk);
1107 /* make sure that we don't have too many active connections.
1108 * If we have, something must be dropped.
1110 * There's no point in trying to do random drop here for
1111 * DoS prevention. The NFS clients does 1 reconnect in 15
1112 * seconds. An attacker can easily beat that.
1114 * The only somewhat efficient mechanism would be if drop
1115 * old connections from the same IP first. But right now
1116 * we don't even record the client IP in svc_sock.
1118 if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) {
1119 struct svc_sock *svsk = NULL;
1120 spin_lock_bh(&serv->sv_lock);
1121 if (!list_empty(&serv->sv_tempsocks)) {
1122 if (net_ratelimit()) {
1123 /* Try to help the admin */
1124 printk(KERN_NOTICE "%s: too many open TCP "
1125 "sockets, consider increasing the "
1126 "number of nfsd threads\n",
1129 "%s: last TCP connect from %s\n",
1130 serv->sv_name, __svc_print_addr(sin,
1134 * Always select the oldest socket. It's not fair,
1137 svsk = list_entry(serv->sv_tempsocks.prev,
1140 set_bit(SK_CLOSE, &svsk->sk_flags);
1141 atomic_inc(&svsk->sk_inuse);
1143 spin_unlock_bh(&serv->sv_lock);
1146 svc_sock_enqueue(svsk);
1153 serv->sv_stats->nettcpconn++;
1158 sock_release(newsock);
1163 * Receive data from a TCP socket.
1166 svc_tcp_recvfrom(struct svc_rqst *rqstp)
1168 struct svc_sock *svsk = rqstp->rq_sock;
1169 struct svc_serv *serv = svsk->sk_server;
1174 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
1175 svsk, test_bit(SK_DATA, &svsk->sk_flags),
1176 test_bit(SK_CONN, &svsk->sk_flags),
1177 test_bit(SK_CLOSE, &svsk->sk_flags));
1179 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
1180 svc_sock_received(svsk);
1181 return svc_deferred_recv(rqstp);
1184 if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
1185 svc_delete_socket(svsk);
1189 if (svsk->sk_sk->sk_state == TCP_LISTEN) {
1190 svc_tcp_accept(svsk);
1191 svc_sock_received(svsk);
1195 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
1196 /* sndbuf needs to have room for one request
1197 * per thread, otherwise we can stall even when the
1198 * network isn't a bottleneck.
1200 * We count all threads rather than threads in a
1201 * particular pool, which provides an upper bound
1202 * on the number of threads which will access the socket.
1204 * rcvbuf just needs to be able to hold a few requests.
1205 * Normally they will be removed from the queue
1206 * as soon a a complete request arrives.
1208 svc_sock_setbufsize(svsk->sk_sock,
1209 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
1210 3 * serv->sv_max_mesg);
1212 clear_bit(SK_DATA, &svsk->sk_flags);
1214 /* Receive data. If we haven't got the record length yet, get
1215 * the next four bytes. Otherwise try to gobble up as much as
1216 * possible up to the complete record length.
1218 if (svsk->sk_tcplen < 4) {
1219 unsigned long want = 4 - svsk->sk_tcplen;
1222 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
1224 if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
1226 svsk->sk_tcplen += len;
1229 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
1231 svc_sock_received(svsk);
1232 return -EAGAIN; /* record header not complete */
1235 svsk->sk_reclen = ntohl(svsk->sk_reclen);
1236 if (!(svsk->sk_reclen & 0x80000000)) {
1237 /* FIXME: technically, a record can be fragmented,
1238 * and non-terminal fragments will not have the top
1239 * bit set in the fragment length header.
1240 * But apparently no known nfs clients send fragmented
1242 if (net_ratelimit())
1243 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx"
1244 " (non-terminal)\n",
1245 (unsigned long) svsk->sk_reclen);
1248 svsk->sk_reclen &= 0x7fffffff;
1249 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
1250 if (svsk->sk_reclen > serv->sv_max_mesg) {
1251 if (net_ratelimit())
1252 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx"
1254 (unsigned long) svsk->sk_reclen);
1259 /* Check whether enough data is available */
1260 len = svc_recv_available(svsk);
1264 if (len < svsk->sk_reclen) {
1265 dprintk("svc: incomplete TCP record (%d of %d)\n",
1266 len, svsk->sk_reclen);
1267 svc_sock_received(svsk);
1268 return -EAGAIN; /* record not complete */
1270 len = svsk->sk_reclen;
1271 set_bit(SK_DATA, &svsk->sk_flags);
1273 vec = rqstp->rq_vec;
1274 vec[0] = rqstp->rq_arg.head[0];
1277 while (vlen < len) {
1278 vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]);
1279 vec[pnum].iov_len = PAGE_SIZE;
1283 rqstp->rq_respages = &rqstp->rq_pages[pnum];
1285 /* Now receive data */
1286 len = svc_recvfrom(rqstp, vec, pnum, len);
1290 dprintk("svc: TCP complete record (%d bytes)\n", len);
1291 rqstp->rq_arg.len = len;
1292 rqstp->rq_arg.page_base = 0;
1293 if (len <= rqstp->rq_arg.head[0].iov_len) {
1294 rqstp->rq_arg.head[0].iov_len = len;
1295 rqstp->rq_arg.page_len = 0;
1297 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
1300 rqstp->rq_xprt_ctxt = NULL;
1301 rqstp->rq_prot = IPPROTO_TCP;
1303 /* Reset TCP read info */
1304 svsk->sk_reclen = 0;
1305 svsk->sk_tcplen = 0;
1307 svc_sock_received(svsk);
1309 serv->sv_stats->nettcpcnt++;
1314 svc_delete_socket(svsk);
1318 if (len == -EAGAIN) {
1319 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1320 svc_sock_received(svsk);
1322 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
1323 svsk->sk_server->sv_name, -len);
1331 * Send out data on TCP socket.
1334 svc_tcp_sendto(struct svc_rqst *rqstp)
1336 struct xdr_buf *xbufp = &rqstp->rq_res;
1340 /* Set up the first element of the reply kvec.
1341 * Any other kvecs that may be in use have been taken
1342 * care of by the server implementation itself.
1344 reclen = htonl(0x80000000|((xbufp->len ) - 4));
1345 memcpy(xbufp->head[0].iov_base, &reclen, 4);
1347 if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags))
1350 sent = svc_sendto(rqstp, &rqstp->rq_res);
1351 if (sent != xbufp->len) {
1352 printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
1353 rqstp->rq_sock->sk_server->sv_name,
1354 (sent<0)?"got error":"sent only",
1356 set_bit(SK_CLOSE, &rqstp->rq_sock->sk_flags);
1357 svc_sock_enqueue(rqstp->rq_sock);
1364 * Setup response header. TCP has a 4B record length field.
1366 static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
1368 struct kvec *resv = &rqstp->rq_res.head[0];
1370 /* tcp needs a space for the record length... */
1374 static int svc_tcp_has_wspace(struct svc_xprt *xprt)
1376 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1377 struct svc_serv *serv = svsk->sk_server;
1382 * Set the SOCK_NOSPACE flag before checking the available
1385 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
1386 required = atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg;
1387 wspace = sk_stream_wspace(svsk->sk_sk);
1389 if (wspace < sk_stream_min_wspace(svsk->sk_sk))
1391 if (required * 2 > wspace)
1394 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
1398 static struct svc_xprt_ops svc_tcp_ops = {
1399 .xpo_recvfrom = svc_tcp_recvfrom,
1400 .xpo_sendto = svc_tcp_sendto,
1401 .xpo_release_rqst = svc_release_skb,
1402 .xpo_detach = svc_sock_detach,
1403 .xpo_free = svc_sock_free,
1404 .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr,
1405 .xpo_has_wspace = svc_tcp_has_wspace,
1408 static struct svc_xprt_class svc_tcp_class = {
1410 .xcl_ops = &svc_tcp_ops,
1411 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
1414 void svc_init_xprt_sock(void)
1416 svc_reg_xprt_class(&svc_tcp_class);
1417 svc_reg_xprt_class(&svc_udp_class);
1420 void svc_cleanup_xprt_sock(void)
1422 svc_unreg_xprt_class(&svc_tcp_class);
1423 svc_unreg_xprt_class(&svc_udp_class);
1427 svc_tcp_init(struct svc_sock *svsk)
1429 struct sock *sk = svsk->sk_sk;
1430 struct tcp_sock *tp = tcp_sk(sk);
1432 svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt);
1434 if (sk->sk_state == TCP_LISTEN) {
1435 dprintk("setting up TCP socket for listening\n");
1436 sk->sk_data_ready = svc_tcp_listen_data_ready;
1437 set_bit(SK_CONN, &svsk->sk_flags);
1439 dprintk("setting up TCP socket for reading\n");
1440 sk->sk_state_change = svc_tcp_state_change;
1441 sk->sk_data_ready = svc_tcp_data_ready;
1442 sk->sk_write_space = svc_write_space;
1444 svsk->sk_reclen = 0;
1445 svsk->sk_tcplen = 0;
1447 tp->nonagle = 1; /* disable Nagle's algorithm */
1449 /* initialise setting must have enough space to
1450 * receive and respond to one request.
1451 * svc_tcp_recvfrom will re-adjust if necessary
1453 svc_sock_setbufsize(svsk->sk_sock,
1454 3 * svsk->sk_server->sv_max_mesg,
1455 3 * svsk->sk_server->sv_max_mesg);
1457 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1458 set_bit(SK_DATA, &svsk->sk_flags);
1459 if (sk->sk_state != TCP_ESTABLISHED)
1460 set_bit(SK_CLOSE, &svsk->sk_flags);
1465 svc_sock_update_bufs(struct svc_serv *serv)
1468 * The number of server threads has changed. Update
1469 * rcvbuf and sndbuf accordingly on all sockets
1471 struct list_head *le;
1473 spin_lock_bh(&serv->sv_lock);
1474 list_for_each(le, &serv->sv_permsocks) {
1475 struct svc_sock *svsk =
1476 list_entry(le, struct svc_sock, sk_list);
1477 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1479 list_for_each(le, &serv->sv_tempsocks) {
1480 struct svc_sock *svsk =
1481 list_entry(le, struct svc_sock, sk_list);
1482 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1484 spin_unlock_bh(&serv->sv_lock);
1488 * Receive the next request on any socket. This code is carefully
1489 * organised not to touch any cachelines in the shared svc_serv
1490 * structure, only cachelines in the local svc_pool.
1493 svc_recv(struct svc_rqst *rqstp, long timeout)
1495 struct svc_sock *svsk = NULL;
1496 struct svc_serv *serv = rqstp->rq_server;
1497 struct svc_pool *pool = rqstp->rq_pool;
1500 struct xdr_buf *arg;
1501 DECLARE_WAITQUEUE(wait, current);
1503 dprintk("svc: server %p waiting for data (to = %ld)\n",
1508 "svc_recv: service %p, socket not NULL!\n",
1510 if (waitqueue_active(&rqstp->rq_wait))
1512 "svc_recv: service %p, wait queue active!\n",
1516 /* now allocate needed pages. If we get a failure, sleep briefly */
1517 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
1518 for (i=0; i < pages ; i++)
1519 while (rqstp->rq_pages[i] == NULL) {
1520 struct page *p = alloc_page(GFP_KERNEL);
1522 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1523 rqstp->rq_pages[i] = p;
1525 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
1526 BUG_ON(pages >= RPCSVC_MAXPAGES);
1528 /* Make arg->head point to first page and arg->pages point to rest */
1529 arg = &rqstp->rq_arg;
1530 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
1531 arg->head[0].iov_len = PAGE_SIZE;
1532 arg->pages = rqstp->rq_pages + 1;
1534 /* save at least one page for response */
1535 arg->page_len = (pages-2)*PAGE_SIZE;
1536 arg->len = (pages-1)*PAGE_SIZE;
1537 arg->tail[0].iov_len = 0;
1544 spin_lock_bh(&pool->sp_lock);
1545 if ((svsk = svc_sock_dequeue(pool)) != NULL) {
1546 rqstp->rq_sock = svsk;
1547 atomic_inc(&svsk->sk_inuse);
1548 rqstp->rq_reserved = serv->sv_max_mesg;
1549 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
1551 /* No data pending. Go to sleep */
1552 svc_thread_enqueue(pool, rqstp);
1555 * We have to be able to interrupt this wait
1556 * to bring down the daemons ...
1558 set_current_state(TASK_INTERRUPTIBLE);
1559 add_wait_queue(&rqstp->rq_wait, &wait);
1560 spin_unlock_bh(&pool->sp_lock);
1562 schedule_timeout(timeout);
1566 spin_lock_bh(&pool->sp_lock);
1567 remove_wait_queue(&rqstp->rq_wait, &wait);
1569 if (!(svsk = rqstp->rq_sock)) {
1570 svc_thread_dequeue(pool, rqstp);
1571 spin_unlock_bh(&pool->sp_lock);
1572 dprintk("svc: server %p, no data yet\n", rqstp);
1573 return signalled()? -EINTR : -EAGAIN;
1576 spin_unlock_bh(&pool->sp_lock);
1578 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
1579 rqstp, pool->sp_id, svsk, atomic_read(&svsk->sk_inuse));
1580 len = svsk->sk_xprt.xpt_ops->xpo_recvfrom(rqstp);
1581 dprintk("svc: got len=%d\n", len);
1583 /* No data, incomplete (TCP) read, or accept() */
1584 if (len == 0 || len == -EAGAIN) {
1585 rqstp->rq_res.len = 0;
1586 svc_sock_release(rqstp);
1589 svsk->sk_lastrecv = get_seconds();
1590 clear_bit(SK_OLD, &svsk->sk_flags);
1592 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
1593 rqstp->rq_chandle.defer = svc_defer;
1596 serv->sv_stats->netcnt++;
1604 svc_drop(struct svc_rqst *rqstp)
1606 dprintk("svc: socket %p dropped request\n", rqstp->rq_sock);
1607 svc_sock_release(rqstp);
1611 * Return reply to client.
1614 svc_send(struct svc_rqst *rqstp)
1616 struct svc_sock *svsk;
1620 if ((svsk = rqstp->rq_sock) == NULL) {
1621 printk(KERN_WARNING "NULL socket pointer in %s:%d\n",
1622 __FILE__, __LINE__);
1626 /* release the receive skb before sending the reply */
1627 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
1629 /* calculate over-all length */
1630 xb = & rqstp->rq_res;
1631 xb->len = xb->head[0].iov_len +
1633 xb->tail[0].iov_len;
1635 /* Grab svsk->sk_mutex to serialize outgoing data. */
1636 mutex_lock(&svsk->sk_mutex);
1637 if (test_bit(SK_DEAD, &svsk->sk_flags))
1640 len = svsk->sk_xprt.xpt_ops->xpo_sendto(rqstp);
1641 mutex_unlock(&svsk->sk_mutex);
1642 svc_sock_release(rqstp);
1644 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
1650 * Timer function to close old temporary sockets, using
1651 * a mark-and-sweep algorithm.
1654 svc_age_temp_sockets(unsigned long closure)
1656 struct svc_serv *serv = (struct svc_serv *)closure;
1657 struct svc_sock *svsk;
1658 struct list_head *le, *next;
1659 LIST_HEAD(to_be_aged);
1661 dprintk("svc_age_temp_sockets\n");
1663 if (!spin_trylock_bh(&serv->sv_lock)) {
1664 /* busy, try again 1 sec later */
1665 dprintk("svc_age_temp_sockets: busy\n");
1666 mod_timer(&serv->sv_temptimer, jiffies + HZ);
1670 list_for_each_safe(le, next, &serv->sv_tempsocks) {
1671 svsk = list_entry(le, struct svc_sock, sk_list);
1673 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags))
1675 if (atomic_read(&svsk->sk_inuse) > 1 || test_bit(SK_BUSY, &svsk->sk_flags))
1677 atomic_inc(&svsk->sk_inuse);
1678 list_move(le, &to_be_aged);
1679 set_bit(SK_CLOSE, &svsk->sk_flags);
1680 set_bit(SK_DETACHED, &svsk->sk_flags);
1682 spin_unlock_bh(&serv->sv_lock);
1684 while (!list_empty(&to_be_aged)) {
1685 le = to_be_aged.next;
1686 /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */
1688 svsk = list_entry(le, struct svc_sock, sk_list);
1690 dprintk("queuing svsk %p for closing, %lu seconds old\n",
1691 svsk, get_seconds() - svsk->sk_lastrecv);
1693 /* a thread will dequeue and close it soon */
1694 svc_sock_enqueue(svsk);
1698 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
1702 * Initialize socket for RPC use and create svc_sock struct
1703 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1705 static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1706 struct socket *sock,
1707 int *errp, int flags)
1709 struct svc_sock *svsk;
1711 int pmap_register = !(flags & SVC_SOCK_ANONYMOUS);
1712 int is_temporary = flags & SVC_SOCK_TEMPORARY;
1714 dprintk("svc: svc_setup_socket %p\n", sock);
1715 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) {
1722 /* Register socket with portmapper */
1723 if (*errp >= 0 && pmap_register)
1724 *errp = svc_register(serv, inet->sk_protocol,
1725 ntohs(inet_sk(inet)->sport));
1732 set_bit(SK_BUSY, &svsk->sk_flags);
1733 inet->sk_user_data = svsk;
1734 svsk->sk_sock = sock;
1736 svsk->sk_ostate = inet->sk_state_change;
1737 svsk->sk_odata = inet->sk_data_ready;
1738 svsk->sk_owspace = inet->sk_write_space;
1739 svsk->sk_server = serv;
1740 atomic_set(&svsk->sk_inuse, 1);
1741 svsk->sk_lastrecv = get_seconds();
1742 spin_lock_init(&svsk->sk_lock);
1743 INIT_LIST_HEAD(&svsk->sk_deferred);
1744 INIT_LIST_HEAD(&svsk->sk_ready);
1745 mutex_init(&svsk->sk_mutex);
1747 /* Initialize the socket */
1748 if (sock->type == SOCK_DGRAM)
1753 spin_lock_bh(&serv->sv_lock);
1755 set_bit(SK_TEMP, &svsk->sk_flags);
1756 list_add(&svsk->sk_list, &serv->sv_tempsocks);
1758 if (serv->sv_temptimer.function == NULL) {
1759 /* setup timer to age temp sockets */
1760 setup_timer(&serv->sv_temptimer, svc_age_temp_sockets,
1761 (unsigned long)serv);
1762 mod_timer(&serv->sv_temptimer,
1763 jiffies + svc_conn_age_period * HZ);
1766 clear_bit(SK_TEMP, &svsk->sk_flags);
1767 list_add(&svsk->sk_list, &serv->sv_permsocks);
1769 spin_unlock_bh(&serv->sv_lock);
1771 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1777 int svc_addsock(struct svc_serv *serv,
1783 struct socket *so = sockfd_lookup(fd, &err);
1784 struct svc_sock *svsk = NULL;
1788 if (so->sk->sk_family != AF_INET)
1789 err = -EAFNOSUPPORT;
1790 else if (so->sk->sk_protocol != IPPROTO_TCP &&
1791 so->sk->sk_protocol != IPPROTO_UDP)
1792 err = -EPROTONOSUPPORT;
1793 else if (so->state > SS_UNCONNECTED)
1796 svsk = svc_setup_socket(serv, so, &err, SVC_SOCK_DEFAULTS);
1798 svc_sock_received(svsk);
1806 if (proto) *proto = so->sk->sk_protocol;
1807 return one_sock_name(name_return, svsk);
1809 EXPORT_SYMBOL_GPL(svc_addsock);
1812 * Create socket for RPC service.
1814 static int svc_create_socket(struct svc_serv *serv, int protocol,
1815 struct sockaddr *sin, int len, int flags)
1817 struct svc_sock *svsk;
1818 struct socket *sock;
1821 char buf[RPC_MAX_ADDRBUFLEN];
1823 dprintk("svc: svc_create_socket(%s, %d, %s)\n",
1824 serv->sv_program->pg_name, protocol,
1825 __svc_print_addr(sin, buf, sizeof(buf)));
1827 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
1828 printk(KERN_WARNING "svc: only UDP and TCP "
1829 "sockets supported\n");
1832 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
1834 error = sock_create_kern(sin->sa_family, type, protocol, &sock);
1838 svc_reclassify_socket(sock);
1840 if (type == SOCK_STREAM)
1841 sock->sk->sk_reuse = 1; /* allow address reuse */
1842 error = kernel_bind(sock, sin, len);
1846 if (protocol == IPPROTO_TCP) {
1847 if ((error = kernel_listen(sock, 64)) < 0)
1851 if ((svsk = svc_setup_socket(serv, sock, &error, flags)) != NULL) {
1852 svc_sock_received(svsk);
1853 return ntohs(inet_sk(svsk->sk_sk)->sport);
1857 dprintk("svc: svc_create_socket error = %d\n", -error);
1863 * Detach the svc_sock from the socket so that no
1864 * more callbacks occur.
1866 static void svc_sock_detach(struct svc_xprt *xprt)
1868 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1869 struct sock *sk = svsk->sk_sk;
1871 dprintk("svc: svc_sock_detach(%p)\n", svsk);
1873 /* put back the old socket callbacks */
1874 sk->sk_state_change = svsk->sk_ostate;
1875 sk->sk_data_ready = svsk->sk_odata;
1876 sk->sk_write_space = svsk->sk_owspace;
1880 * Free the svc_sock's socket resources and the svc_sock itself.
1882 static void svc_sock_free(struct svc_xprt *xprt)
1884 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1885 dprintk("svc: svc_sock_free(%p)\n", svsk);
1887 if (svsk->sk_info_authunix != NULL)
1888 svcauth_unix_info_release(svsk->sk_info_authunix);
1889 if (svsk->sk_sock->file)
1890 sockfd_put(svsk->sk_sock);
1892 sock_release(svsk->sk_sock);
1897 * Remove a dead socket
1900 svc_delete_socket(struct svc_sock *svsk)
1902 struct svc_serv *serv;
1905 dprintk("svc: svc_delete_socket(%p)\n", svsk);
1907 serv = svsk->sk_server;
1910 svsk->sk_xprt.xpt_ops->xpo_detach(&svsk->sk_xprt);
1912 spin_lock_bh(&serv->sv_lock);
1914 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags))
1915 list_del_init(&svsk->sk_list);
1917 * We used to delete the svc_sock from whichever list
1918 * it's sk_ready node was on, but we don't actually
1919 * need to. This is because the only time we're called
1920 * while still attached to a queue, the queue itself
1921 * is about to be destroyed (in svc_destroy).
1923 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) {
1924 BUG_ON(atomic_read(&svsk->sk_inuse)<2);
1925 atomic_dec(&svsk->sk_inuse);
1926 if (test_bit(SK_TEMP, &svsk->sk_flags))
1930 spin_unlock_bh(&serv->sv_lock);
1933 static void svc_close_socket(struct svc_sock *svsk)
1935 set_bit(SK_CLOSE, &svsk->sk_flags);
1936 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags))
1937 /* someone else will have to effect the close */
1940 atomic_inc(&svsk->sk_inuse);
1941 svc_delete_socket(svsk);
1942 clear_bit(SK_BUSY, &svsk->sk_flags);
1946 void svc_force_close_socket(struct svc_sock *svsk)
1948 set_bit(SK_CLOSE, &svsk->sk_flags);
1949 if (test_bit(SK_BUSY, &svsk->sk_flags)) {
1950 /* Waiting to be processed, but no threads left,
1951 * So just remove it from the waiting list
1953 list_del_init(&svsk->sk_ready);
1954 clear_bit(SK_BUSY, &svsk->sk_flags);
1956 svc_close_socket(svsk);
1960 * svc_makesock - Make a socket for nfsd and lockd
1961 * @serv: RPC server structure
1962 * @protocol: transport protocol to use
1963 * @port: port to use
1964 * @flags: requested socket characteristics
1967 int svc_makesock(struct svc_serv *serv, int protocol, unsigned short port,
1970 struct sockaddr_in sin = {
1971 .sin_family = AF_INET,
1972 .sin_addr.s_addr = INADDR_ANY,
1973 .sin_port = htons(port),
1976 dprintk("svc: creating socket proto = %d\n", protocol);
1977 return svc_create_socket(serv, protocol, (struct sockaddr *) &sin,
1978 sizeof(sin), flags);
1982 * Handle defer and revisit of requests
1985 static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1987 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle);
1988 struct svc_sock *svsk;
1991 svc_sock_put(dr->svsk);
1995 dprintk("revisit queued\n");
1998 spin_lock(&svsk->sk_lock);
1999 list_add(&dr->handle.recent, &svsk->sk_deferred);
2000 spin_unlock(&svsk->sk_lock);
2001 set_bit(SK_DEFERRED, &svsk->sk_flags);
2002 svc_sock_enqueue(svsk);
2006 static struct cache_deferred_req *
2007 svc_defer(struct cache_req *req)
2009 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
2010 int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len);
2011 struct svc_deferred_req *dr;
2013 if (rqstp->rq_arg.page_len)
2014 return NULL; /* if more than a page, give up FIXME */
2015 if (rqstp->rq_deferred) {
2016 dr = rqstp->rq_deferred;
2017 rqstp->rq_deferred = NULL;
2019 int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
2020 /* FIXME maybe discard if size too large */
2021 dr = kmalloc(size, GFP_KERNEL);
2025 dr->handle.owner = rqstp->rq_server;
2026 dr->prot = rqstp->rq_prot;
2027 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen);
2028 dr->addrlen = rqstp->rq_addrlen;
2029 dr->daddr = rqstp->rq_daddr;
2030 dr->argslen = rqstp->rq_arg.len >> 2;
2031 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
2033 atomic_inc(&rqstp->rq_sock->sk_inuse);
2034 dr->svsk = rqstp->rq_sock;
2036 dr->handle.revisit = svc_revisit;
2041 * recv data from a deferred request into an active one
2043 static int svc_deferred_recv(struct svc_rqst *rqstp)
2045 struct svc_deferred_req *dr = rqstp->rq_deferred;
2047 rqstp->rq_arg.head[0].iov_base = dr->args;
2048 rqstp->rq_arg.head[0].iov_len = dr->argslen<<2;
2049 rqstp->rq_arg.page_len = 0;
2050 rqstp->rq_arg.len = dr->argslen<<2;
2051 rqstp->rq_prot = dr->prot;
2052 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
2053 rqstp->rq_addrlen = dr->addrlen;
2054 rqstp->rq_daddr = dr->daddr;
2055 rqstp->rq_respages = rqstp->rq_pages;
2056 return dr->argslen<<2;
2060 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
2062 struct svc_deferred_req *dr = NULL;
2064 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
2066 spin_lock(&svsk->sk_lock);
2067 clear_bit(SK_DEFERRED, &svsk->sk_flags);
2068 if (!list_empty(&svsk->sk_deferred)) {
2069 dr = list_entry(svsk->sk_deferred.next,
2070 struct svc_deferred_req,
2072 list_del_init(&dr->handle.recent);
2073 set_bit(SK_DEFERRED, &svsk->sk_flags);
2075 spin_unlock(&svsk->sk_lock);