2 * linux/net/sunrpc/svcsock.c
4 * These are the RPC server socket internals.
6 * The server scheduling algorithm does not always distribute the load
7 * evenly when servicing a single client. May need to modify the
8 * svc_xprt_enqueue procedure...
10 * TCP support is largely untested and may be a little slow. The problem
11 * is that we currently do two separate recvfrom's, one for the 4-byte
12 * record length, and the second for the actual record. This could possibly
13 * be improved by always reading a minimum size of around 100 bytes and
14 * tucking any superfluous bytes away in a temporary store. Still, that
15 * leaves write requests out in the rain. An alternative may be to peek at
16 * the first skb in the queue, and if it matches the next TCP sequence
17 * number, to extract the record marker. Yuck.
19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/errno.h>
25 #include <linux/fcntl.h>
26 #include <linux/net.h>
28 #include <linux/inet.h>
29 #include <linux/udp.h>
30 #include <linux/tcp.h>
31 #include <linux/unistd.h>
32 #include <linux/slab.h>
33 #include <linux/netdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/file.h>
36 #include <linux/freezer.h>
38 #include <net/checksum.h>
41 #include <net/tcp_states.h>
42 #include <asm/uaccess.h>
43 #include <asm/ioctls.h>
45 #include <linux/sunrpc/types.h>
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/sunrpc/xdr.h>
48 #include <linux/sunrpc/svcsock.h>
49 #include <linux/sunrpc/stats.h>
51 /* SMP locking strategy:
53 * svc_pool->sp_lock protects most of the fields of that pool.
54 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
55 * when both need to be taken (rare), svc_serv->sv_lock is first.
56 * BKL protects svc_serv->sv_nrthread.
57 * svc_sock->sk_lock protects the svc_sock->sk_deferred list
58 * and the ->sk_info_authunix cache.
59 * svc_sock->sk_xprt.xpt_flags.XPT_BUSY prevents a svc_sock being
62 * Some flags can be set to certain values at any time
63 * providing that certain rules are followed:
65 * XPT_CONN, XPT_DATA, can be set or cleared at any time.
66 * after a set, svc_xprt_enqueue must be called.
67 * after a clear, the socket must be read/accepted
68 * if this succeeds, it must be set again.
69 * XPT_CLOSE can set at any time. It is never cleared.
70 * xpt_ref contains a bias of '1' until XPT_DEAD is set.
71 * so when xprt_ref hits zero, we know the transport is dead
72 * and no-one is using it.
73 * XPT_DEAD can only be set while XPT_BUSY is held which ensures
74 * no other thread will be using the socket or will try to
79 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
82 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
83 int *errp, int flags);
84 static void svc_delete_xprt(struct svc_xprt *xprt);
85 static void svc_udp_data_ready(struct sock *, int);
86 static int svc_udp_recvfrom(struct svc_rqst *);
87 static int svc_udp_sendto(struct svc_rqst *);
88 static void svc_close_xprt(struct svc_xprt *xprt);
89 static void svc_sock_detach(struct svc_xprt *);
90 static void svc_sock_free(struct svc_xprt *);
92 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk);
93 static int svc_deferred_recv(struct svc_rqst *rqstp);
94 static struct cache_deferred_req *svc_defer(struct cache_req *req);
95 static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
96 struct sockaddr *, int, int);
98 /* apparently the "standard" is that clients close
99 * idle connections after 5 minutes, servers after
101 * http://www.connectathon.org/talks96/nfstcp.pdf
103 static int svc_conn_age_period = 6*60;
105 #ifdef CONFIG_DEBUG_LOCK_ALLOC
106 static struct lock_class_key svc_key[2];
107 static struct lock_class_key svc_slock_key[2];
109 static inline void svc_reclassify_socket(struct socket *sock)
111 struct sock *sk = sock->sk;
112 BUG_ON(sock_owned_by_user(sk));
113 switch (sk->sk_family) {
115 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
116 &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]);
120 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
121 &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]);
129 static inline void svc_reclassify_socket(struct socket *sock)
134 static char *__svc_print_addr(struct sockaddr *addr, char *buf, size_t len)
136 switch (addr->sa_family) {
138 snprintf(buf, len, "%u.%u.%u.%u, port=%u",
139 NIPQUAD(((struct sockaddr_in *) addr)->sin_addr),
140 ntohs(((struct sockaddr_in *) addr)->sin_port));
144 snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u",
145 NIP6(((struct sockaddr_in6 *) addr)->sin6_addr),
146 ntohs(((struct sockaddr_in6 *) addr)->sin6_port));
150 snprintf(buf, len, "unknown address type: %d", addr->sa_family);
157 * svc_print_addr - Format rq_addr field for printing
158 * @rqstp: svc_rqst struct containing address to print
159 * @buf: target buffer for formatted address
160 * @len: length of target buffer
163 char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
165 return __svc_print_addr(svc_addr(rqstp), buf, len);
167 EXPORT_SYMBOL_GPL(svc_print_addr);
170 * Queue up an idle server thread. Must have pool->sp_lock held.
171 * Note: this is really a stack rather than a queue, so that we only
172 * use as many different threads as we need, and the rest don't pollute
176 svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp)
178 list_add(&rqstp->rq_list, &pool->sp_threads);
182 * Dequeue an nfsd thread. Must have pool->sp_lock held.
185 svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
187 list_del(&rqstp->rq_list);
191 * Release an skbuff after use
193 static void svc_release_skb(struct svc_rqst *rqstp)
195 struct sk_buff *skb = rqstp->rq_xprt_ctxt;
196 struct svc_deferred_req *dr = rqstp->rq_deferred;
199 rqstp->rq_xprt_ctxt = NULL;
201 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
202 skb_free_datagram(rqstp->rq_sock->sk_sk, skb);
205 rqstp->rq_deferred = NULL;
211 * Queue up a socket with data pending. If there are idle nfsd
212 * processes, wake 'em up.
215 void svc_xprt_enqueue(struct svc_xprt *xprt)
217 struct svc_serv *serv = xprt->xpt_server;
218 struct svc_pool *pool;
219 struct svc_rqst *rqstp;
222 if (!(xprt->xpt_flags &
223 ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
225 if (test_bit(XPT_DEAD, &xprt->xpt_flags))
229 pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
232 spin_lock_bh(&pool->sp_lock);
234 if (!list_empty(&pool->sp_threads) &&
235 !list_empty(&pool->sp_sockets))
238 "threads and transports both waiting??\n");
240 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
241 /* Don't enqueue dead sockets */
242 dprintk("svc: transport %p is dead, not enqueued\n", xprt);
246 /* Mark socket as busy. It will remain in this state until the
247 * server has processed all pending data and put the socket back
248 * on the idle list. We update XPT_BUSY atomically because
249 * it also guards against trying to enqueue the svc_sock twice.
251 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) {
252 /* Don't enqueue socket while already enqueued */
253 dprintk("svc: transport %p busy, not enqueued\n", xprt);
256 BUG_ON(xprt->xpt_pool != NULL);
257 xprt->xpt_pool = pool;
259 /* Handle pending connection */
260 if (test_bit(XPT_CONN, &xprt->xpt_flags))
263 /* Handle close in-progress */
264 if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
267 /* Check if we have space to reply to a request */
268 if (!xprt->xpt_ops->xpo_has_wspace(xprt)) {
269 /* Don't enqueue while not enough space for reply */
270 dprintk("svc: no write space, transport %p not enqueued\n",
272 xprt->xpt_pool = NULL;
273 clear_bit(XPT_BUSY, &xprt->xpt_flags);
278 if (!list_empty(&pool->sp_threads)) {
279 rqstp = list_entry(pool->sp_threads.next,
282 dprintk("svc: transport %p served by daemon %p\n",
284 svc_thread_dequeue(pool, rqstp);
287 "svc_xprt_enqueue: server %p, rq_xprt=%p!\n",
288 rqstp, rqstp->rq_xprt);
289 rqstp->rq_xprt = xprt;
291 rqstp->rq_reserved = serv->sv_max_mesg;
292 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
293 BUG_ON(xprt->xpt_pool != pool);
294 wake_up(&rqstp->rq_wait);
296 dprintk("svc: transport %p put into queue\n", xprt);
297 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
298 BUG_ON(xprt->xpt_pool != pool);
302 spin_unlock_bh(&pool->sp_lock);
304 EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
307 * Dequeue the first socket. Must be called with the pool->sp_lock held.
309 static inline struct svc_sock *
310 svc_sock_dequeue(struct svc_pool *pool)
312 struct svc_sock *svsk;
314 if (list_empty(&pool->sp_sockets))
317 svsk = list_entry(pool->sp_sockets.next,
318 struct svc_sock, sk_xprt.xpt_ready);
319 list_del_init(&svsk->sk_xprt.xpt_ready);
321 dprintk("svc: socket %p dequeued, inuse=%d\n",
322 svsk->sk_sk, atomic_read(&svsk->sk_xprt.xpt_ref.refcount));
328 * svc_xprt_received conditionally queues the transport for processing
329 * by another thread. The caller must hold the XPT_BUSY bit and must
330 * not thereafter touch transport data.
332 * Note: XPT_DATA only gets cleared when a read-attempt finds no (or
333 * insufficient) data.
335 void svc_xprt_received(struct svc_xprt *xprt)
337 BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
338 xprt->xpt_pool = NULL;
339 clear_bit(XPT_BUSY, &xprt->xpt_flags);
340 svc_xprt_enqueue(xprt);
342 EXPORT_SYMBOL_GPL(svc_xprt_received);
345 * svc_reserve - change the space reserved for the reply to a request.
346 * @rqstp: The request in question
347 * @space: new max space to reserve
349 * Each request reserves some space on the output queue of the socket
350 * to make sure the reply fits. This function reduces that reserved
351 * space to be the amount of space used already, plus @space.
354 void svc_reserve(struct svc_rqst *rqstp, int space)
356 space += rqstp->rq_res.head[0].iov_len;
358 if (space < rqstp->rq_reserved) {
359 struct svc_xprt *xprt = rqstp->rq_xprt;
360 atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
361 rqstp->rq_reserved = space;
363 svc_xprt_enqueue(xprt);
368 svc_sock_release(struct svc_rqst *rqstp)
370 struct svc_sock *svsk = rqstp->rq_sock;
372 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
374 svc_free_res_pages(rqstp);
375 rqstp->rq_res.page_len = 0;
376 rqstp->rq_res.page_base = 0;
379 /* Reset response buffer and release
381 * But first, check that enough space was reserved
382 * for the reply, otherwise we have a bug!
384 if ((rqstp->rq_res.len) > rqstp->rq_reserved)
385 printk(KERN_ERR "RPC request reserved %d but used %d\n",
389 rqstp->rq_res.head[0].iov_len = 0;
390 svc_reserve(rqstp, 0);
391 rqstp->rq_sock = NULL;
393 svc_xprt_put(&svsk->sk_xprt);
397 * External function to wake up a server waiting for data
398 * This really only makes sense for services like lockd
399 * which have exactly one thread anyway.
402 svc_wake_up(struct svc_serv *serv)
404 struct svc_rqst *rqstp;
406 struct svc_pool *pool;
408 for (i = 0; i < serv->sv_nrpools; i++) {
409 pool = &serv->sv_pools[i];
411 spin_lock_bh(&pool->sp_lock);
412 if (!list_empty(&pool->sp_threads)) {
413 rqstp = list_entry(pool->sp_threads.next,
416 dprintk("svc: daemon %p woken up.\n", rqstp);
418 svc_thread_dequeue(pool, rqstp);
419 rqstp->rq_sock = NULL;
421 wake_up(&rqstp->rq_wait);
423 spin_unlock_bh(&pool->sp_lock);
427 union svc_pktinfo_u {
428 struct in_pktinfo pkti;
429 struct in6_pktinfo pkti6;
431 #define SVC_PKTINFO_SPACE \
432 CMSG_SPACE(sizeof(union svc_pktinfo_u))
434 static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
436 switch (rqstp->rq_sock->sk_sk->sk_family) {
438 struct in_pktinfo *pki = CMSG_DATA(cmh);
440 cmh->cmsg_level = SOL_IP;
441 cmh->cmsg_type = IP_PKTINFO;
442 pki->ipi_ifindex = 0;
443 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr.addr.s_addr;
444 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
449 struct in6_pktinfo *pki = CMSG_DATA(cmh);
451 cmh->cmsg_level = SOL_IPV6;
452 cmh->cmsg_type = IPV6_PKTINFO;
453 pki->ipi6_ifindex = 0;
454 ipv6_addr_copy(&pki->ipi6_addr,
455 &rqstp->rq_daddr.addr6);
456 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
464 * Generic sendto routine
467 svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
469 struct svc_sock *svsk = rqstp->rq_sock;
470 struct socket *sock = svsk->sk_sock;
474 long all[SVC_PKTINFO_SPACE / sizeof(long)];
476 struct cmsghdr *cmh = &buffer.hdr;
480 struct page **ppage = xdr->pages;
481 size_t base = xdr->page_base;
482 unsigned int pglen = xdr->page_len;
483 unsigned int flags = MSG_MORE;
484 char buf[RPC_MAX_ADDRBUFLEN];
488 if (rqstp->rq_prot == IPPROTO_UDP) {
489 struct msghdr msg = {
490 .msg_name = &rqstp->rq_addr,
491 .msg_namelen = rqstp->rq_addrlen,
493 .msg_controllen = sizeof(buffer),
494 .msg_flags = MSG_MORE,
497 svc_set_cmsg_data(rqstp, cmh);
499 if (sock_sendmsg(sock, &msg, 0) < 0)
504 if (slen == xdr->head[0].iov_len)
506 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0,
507 xdr->head[0].iov_len, flags);
508 if (len != xdr->head[0].iov_len)
510 slen -= xdr->head[0].iov_len;
515 size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen;
519 result = kernel_sendpage(sock, *ppage, base, size, flags);
526 size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen;
531 if (xdr->tail[0].iov_len) {
532 result = kernel_sendpage(sock, rqstp->rq_respages[0],
533 ((unsigned long)xdr->tail[0].iov_base)
535 xdr->tail[0].iov_len, 0);
541 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n",
542 rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len,
543 xdr->len, len, svc_print_addr(rqstp, buf, sizeof(buf)));
549 * Report socket names for nfsdfs
551 static int one_sock_name(char *buf, struct svc_sock *svsk)
555 switch(svsk->sk_sk->sk_family) {
557 len = sprintf(buf, "ipv4 %s %u.%u.%u.%u %d\n",
558 svsk->sk_sk->sk_protocol==IPPROTO_UDP?
560 NIPQUAD(inet_sk(svsk->sk_sk)->rcv_saddr),
561 inet_sk(svsk->sk_sk)->num);
564 len = sprintf(buf, "*unknown-%d*\n",
565 svsk->sk_sk->sk_family);
571 svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
573 struct svc_sock *svsk, *closesk = NULL;
578 spin_lock_bh(&serv->sv_lock);
579 list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) {
580 int onelen = one_sock_name(buf+len, svsk);
581 if (toclose && strcmp(toclose, buf+len) == 0)
586 spin_unlock_bh(&serv->sv_lock);
588 /* Should unregister with portmap, but you cannot
589 * unregister just one protocol...
591 svc_close_xprt(&closesk->sk_xprt);
596 EXPORT_SYMBOL(svc_sock_names);
599 * Check input queue length
602 svc_recv_available(struct svc_sock *svsk)
604 struct socket *sock = svsk->sk_sock;
607 err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail);
609 return (err >= 0)? avail : err;
613 * Generic recvfrom routine.
616 svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen)
618 struct svc_sock *svsk = rqstp->rq_sock;
619 struct msghdr msg = {
620 .msg_flags = MSG_DONTWAIT,
622 struct sockaddr *sin;
625 len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen,
628 /* sock_recvmsg doesn't fill in the name/namelen, so we must..
630 memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen);
631 rqstp->rq_addrlen = svsk->sk_remotelen;
633 /* Destination address in request is needed for binding the
634 * source address in RPC callbacks later.
636 sin = (struct sockaddr *)&svsk->sk_local;
637 switch (sin->sa_family) {
639 rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr;
642 rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr;
646 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
647 svsk, iov[0].iov_base, iov[0].iov_len, len);
653 * Set socket snd and rcv buffer lengths
656 svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv)
660 oldfs = get_fs(); set_fs(KERNEL_DS);
661 sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
662 (char*)&snd, sizeof(snd));
663 sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
664 (char*)&rcv, sizeof(rcv));
666 /* sock_setsockopt limits use to sysctl_?mem_max,
667 * which isn't acceptable. Until that is made conditional
668 * on not having CAP_SYS_RESOURCE or similar, we go direct...
669 * DaveM said I could!
672 sock->sk->sk_sndbuf = snd * 2;
673 sock->sk->sk_rcvbuf = rcv * 2;
674 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
675 release_sock(sock->sk);
679 * INET callback when data has been received on the socket.
682 svc_udp_data_ready(struct sock *sk, int count)
684 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
687 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
689 test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
690 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
691 svc_xprt_enqueue(&svsk->sk_xprt);
693 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
694 wake_up_interruptible(sk->sk_sleep);
698 * INET callback when space is newly available on the socket.
701 svc_write_space(struct sock *sk)
703 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
706 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
707 svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
708 svc_xprt_enqueue(&svsk->sk_xprt);
711 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
712 dprintk("RPC svc_write_space: someone sleeping on %p\n",
714 wake_up_interruptible(sk->sk_sleep);
718 static inline void svc_udp_get_dest_address(struct svc_rqst *rqstp,
721 switch (rqstp->rq_sock->sk_sk->sk_family) {
723 struct in_pktinfo *pki = CMSG_DATA(cmh);
724 rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr;
728 struct in6_pktinfo *pki = CMSG_DATA(cmh);
729 ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr);
736 * Receive a datagram from a UDP socket.
739 svc_udp_recvfrom(struct svc_rqst *rqstp)
741 struct svc_sock *svsk = rqstp->rq_sock;
742 struct svc_serv *serv = svsk->sk_xprt.xpt_server;
746 long all[SVC_PKTINFO_SPACE / sizeof(long)];
748 struct cmsghdr *cmh = &buffer.hdr;
750 struct msghdr msg = {
751 .msg_name = svc_addr(rqstp),
753 .msg_controllen = sizeof(buffer),
754 .msg_flags = MSG_DONTWAIT,
757 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
758 /* udp sockets need large rcvbuf as all pending
759 * requests are still in that buffer. sndbuf must
760 * also be large enough that there is enough space
761 * for one reply per thread. We count all threads
762 * rather than threads in a particular pool, which
763 * provides an upper bound on the number of threads
764 * which will access the socket.
766 svc_sock_setbufsize(svsk->sk_sock,
767 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
768 (serv->sv_nrthreads+3) * serv->sv_max_mesg);
770 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
771 svc_xprt_received(&svsk->sk_xprt);
772 return svc_deferred_recv(rqstp);
775 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
777 err = kernel_recvmsg(svsk->sk_sock, &msg, NULL,
778 0, 0, MSG_PEEK | MSG_DONTWAIT);
780 skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err);
783 if (err != -EAGAIN) {
784 /* possibly an icmp error */
785 dprintk("svc: recvfrom returned error %d\n", -err);
786 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
788 svc_xprt_received(&svsk->sk_xprt);
791 rqstp->rq_addrlen = sizeof(rqstp->rq_addr);
792 if (skb->tstamp.tv64 == 0) {
793 skb->tstamp = ktime_get_real();
794 /* Don't enable netstamp, sunrpc doesn't
795 need that much accuracy */
797 svsk->sk_sk->sk_stamp = skb->tstamp;
798 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
801 * Maybe more packets - kick another thread ASAP.
803 svc_xprt_received(&svsk->sk_xprt);
805 len = skb->len - sizeof(struct udphdr);
806 rqstp->rq_arg.len = len;
808 rqstp->rq_prot = IPPROTO_UDP;
810 if (cmh->cmsg_level != IPPROTO_IP ||
811 cmh->cmsg_type != IP_PKTINFO) {
813 printk("rpcsvc: received unknown control message:"
815 cmh->cmsg_level, cmh->cmsg_type);
816 skb_free_datagram(svsk->sk_sk, skb);
819 svc_udp_get_dest_address(rqstp, cmh);
821 if (skb_is_nonlinear(skb)) {
822 /* we have to copy */
824 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
827 skb_free_datagram(svsk->sk_sk, skb);
831 skb_free_datagram(svsk->sk_sk, skb);
833 /* we can use it in-place */
834 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
835 rqstp->rq_arg.head[0].iov_len = len;
836 if (skb_checksum_complete(skb)) {
837 skb_free_datagram(svsk->sk_sk, skb);
840 rqstp->rq_xprt_ctxt = skb;
843 rqstp->rq_arg.page_base = 0;
844 if (len <= rqstp->rq_arg.head[0].iov_len) {
845 rqstp->rq_arg.head[0].iov_len = len;
846 rqstp->rq_arg.page_len = 0;
847 rqstp->rq_respages = rqstp->rq_pages+1;
849 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
850 rqstp->rq_respages = rqstp->rq_pages + 1 +
851 DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE);
855 serv->sv_stats->netudpcnt++;
861 svc_udp_sendto(struct svc_rqst *rqstp)
865 error = svc_sendto(rqstp, &rqstp->rq_res);
866 if (error == -ECONNREFUSED)
867 /* ICMP error on earlier request. */
868 error = svc_sendto(rqstp, &rqstp->rq_res);
873 static void svc_udp_prep_reply_hdr(struct svc_rqst *rqstp)
877 static int svc_udp_has_wspace(struct svc_xprt *xprt)
879 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
880 struct svc_serv *serv = xprt->xpt_server;
881 unsigned long required;
884 * Set the SOCK_NOSPACE flag before checking the available
887 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
888 required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg;
889 if (required*2 > sock_wspace(svsk->sk_sk))
891 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
895 static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt)
901 static struct svc_xprt *svc_udp_create(struct svc_serv *serv,
902 struct sockaddr *sa, int salen,
905 return svc_create_socket(serv, IPPROTO_UDP, sa, salen, flags);
908 static struct svc_xprt_ops svc_udp_ops = {
909 .xpo_create = svc_udp_create,
910 .xpo_recvfrom = svc_udp_recvfrom,
911 .xpo_sendto = svc_udp_sendto,
912 .xpo_release_rqst = svc_release_skb,
913 .xpo_detach = svc_sock_detach,
914 .xpo_free = svc_sock_free,
915 .xpo_prep_reply_hdr = svc_udp_prep_reply_hdr,
916 .xpo_has_wspace = svc_udp_has_wspace,
917 .xpo_accept = svc_udp_accept,
920 static struct svc_xprt_class svc_udp_class = {
922 .xcl_owner = THIS_MODULE,
923 .xcl_ops = &svc_udp_ops,
924 .xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP,
927 static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
932 svc_xprt_init(&svc_udp_class, &svsk->sk_xprt, serv);
933 svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
934 svsk->sk_sk->sk_write_space = svc_write_space;
936 /* initialise setting must have enough space to
937 * receive and respond to one request.
938 * svc_udp_recvfrom will re-adjust if necessary
940 svc_sock_setbufsize(svsk->sk_sock,
941 3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
942 3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
944 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* might have come in before data_ready set up */
945 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
949 /* make sure we get destination address info */
950 svsk->sk_sock->ops->setsockopt(svsk->sk_sock, IPPROTO_IP, IP_PKTINFO,
951 (char __user *)&one, sizeof(one));
956 * A data_ready event on a listening socket means there's a connection
957 * pending. Do not use state_change as a substitute for it.
960 svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
962 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
964 dprintk("svc: socket %p TCP (listen) state change %d\n",
968 * This callback may called twice when a new connection
969 * is established as a child socket inherits everything
970 * from a parent LISTEN socket.
971 * 1) data_ready method of the parent socket will be called
972 * when one of child sockets become ESTABLISHED.
973 * 2) data_ready method of the child socket may be called
974 * when it receives data before the socket is accepted.
975 * In case of 2, we should ignore it silently.
977 if (sk->sk_state == TCP_LISTEN) {
979 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
980 svc_xprt_enqueue(&svsk->sk_xprt);
982 printk("svc: socket %p: no user data\n", sk);
985 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
986 wake_up_interruptible_all(sk->sk_sleep);
990 * A state change on a connected socket means it's dying or dead.
993 svc_tcp_state_change(struct sock *sk)
995 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
997 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
998 sk, sk->sk_state, sk->sk_user_data);
1001 printk("svc: socket %p: no user data\n", sk);
1003 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
1004 svc_xprt_enqueue(&svsk->sk_xprt);
1006 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1007 wake_up_interruptible_all(sk->sk_sleep);
1011 svc_tcp_data_ready(struct sock *sk, int count)
1013 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
1015 dprintk("svc: socket %p TCP data ready (svsk %p)\n",
1016 sk, sk->sk_user_data);
1018 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1019 svc_xprt_enqueue(&svsk->sk_xprt);
1021 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1022 wake_up_interruptible(sk->sk_sleep);
1025 static inline int svc_port_is_privileged(struct sockaddr *sin)
1027 switch (sin->sa_family) {
1029 return ntohs(((struct sockaddr_in *)sin)->sin_port)
1032 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
1040 * Accept a TCP connection
1042 static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
1044 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1045 struct sockaddr_storage addr;
1046 struct sockaddr *sin = (struct sockaddr *) &addr;
1047 struct svc_serv *serv = svsk->sk_xprt.xpt_server;
1048 struct socket *sock = svsk->sk_sock;
1049 struct socket *newsock;
1050 struct svc_sock *newsvsk;
1052 char buf[RPC_MAX_ADDRBUFLEN];
1054 dprintk("svc: tcp_accept %p sock %p\n", svsk, sock);
1058 clear_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
1059 err = kernel_accept(sock, &newsock, O_NONBLOCK);
1062 printk(KERN_WARNING "%s: no more sockets!\n",
1064 else if (err != -EAGAIN && net_ratelimit())
1065 printk(KERN_WARNING "%s: accept failed (err %d)!\n",
1066 serv->sv_name, -err);
1069 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
1071 err = kernel_getpeername(newsock, sin, &slen);
1073 if (net_ratelimit())
1074 printk(KERN_WARNING "%s: peername failed (err %d)!\n",
1075 serv->sv_name, -err);
1076 goto failed; /* aborted connection or whatever */
1079 /* Ideally, we would want to reject connections from unauthorized
1080 * hosts here, but when we get encryption, the IP of the host won't
1081 * tell us anything. For now just warn about unpriv connections.
1083 if (!svc_port_is_privileged(sin)) {
1084 dprintk(KERN_WARNING
1085 "%s: connect from unprivileged port: %s\n",
1087 __svc_print_addr(sin, buf, sizeof(buf)));
1089 dprintk("%s: connect from %s\n", serv->sv_name,
1090 __svc_print_addr(sin, buf, sizeof(buf)));
1092 /* make sure that a write doesn't block forever when
1095 newsock->sk->sk_sndtimeo = HZ*30;
1097 if (!(newsvsk = svc_setup_socket(serv, newsock, &err,
1098 (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY))))
1100 memcpy(&newsvsk->sk_remote, sin, slen);
1101 newsvsk->sk_remotelen = slen;
1102 err = kernel_getsockname(newsock, sin, &slen);
1103 if (unlikely(err < 0)) {
1104 dprintk("svc_tcp_accept: kernel_getsockname error %d\n", -err);
1105 slen = offsetof(struct sockaddr, sa_data);
1107 memcpy(&newsvsk->sk_local, sin, slen);
1110 serv->sv_stats->nettcpconn++;
1112 return &newsvsk->sk_xprt;
1115 sock_release(newsock);
1120 * Receive data from a TCP socket.
1123 svc_tcp_recvfrom(struct svc_rqst *rqstp)
1125 struct svc_sock *svsk = rqstp->rq_sock;
1126 struct svc_serv *serv = svsk->sk_xprt.xpt_server;
1131 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
1132 svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
1133 test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
1134 test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
1136 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
1137 svc_xprt_received(&svsk->sk_xprt);
1138 return svc_deferred_recv(rqstp);
1141 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
1142 /* sndbuf needs to have room for one request
1143 * per thread, otherwise we can stall even when the
1144 * network isn't a bottleneck.
1146 * We count all threads rather than threads in a
1147 * particular pool, which provides an upper bound
1148 * on the number of threads which will access the socket.
1150 * rcvbuf just needs to be able to hold a few requests.
1151 * Normally they will be removed from the queue
1152 * as soon a a complete request arrives.
1154 svc_sock_setbufsize(svsk->sk_sock,
1155 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
1156 3 * serv->sv_max_mesg);
1158 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1160 /* Receive data. If we haven't got the record length yet, get
1161 * the next four bytes. Otherwise try to gobble up as much as
1162 * possible up to the complete record length.
1164 if (svsk->sk_tcplen < 4) {
1165 unsigned long want = 4 - svsk->sk_tcplen;
1168 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
1170 if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
1172 svsk->sk_tcplen += len;
1175 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
1177 svc_xprt_received(&svsk->sk_xprt);
1178 return -EAGAIN; /* record header not complete */
1181 svsk->sk_reclen = ntohl(svsk->sk_reclen);
1182 if (!(svsk->sk_reclen & 0x80000000)) {
1183 /* FIXME: technically, a record can be fragmented,
1184 * and non-terminal fragments will not have the top
1185 * bit set in the fragment length header.
1186 * But apparently no known nfs clients send fragmented
1188 if (net_ratelimit())
1189 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx"
1190 " (non-terminal)\n",
1191 (unsigned long) svsk->sk_reclen);
1194 svsk->sk_reclen &= 0x7fffffff;
1195 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
1196 if (svsk->sk_reclen > serv->sv_max_mesg) {
1197 if (net_ratelimit())
1198 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx"
1200 (unsigned long) svsk->sk_reclen);
1205 /* Check whether enough data is available */
1206 len = svc_recv_available(svsk);
1210 if (len < svsk->sk_reclen) {
1211 dprintk("svc: incomplete TCP record (%d of %d)\n",
1212 len, svsk->sk_reclen);
1213 svc_xprt_received(&svsk->sk_xprt);
1214 return -EAGAIN; /* record not complete */
1216 len = svsk->sk_reclen;
1217 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1219 vec = rqstp->rq_vec;
1220 vec[0] = rqstp->rq_arg.head[0];
1223 while (vlen < len) {
1224 vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]);
1225 vec[pnum].iov_len = PAGE_SIZE;
1229 rqstp->rq_respages = &rqstp->rq_pages[pnum];
1231 /* Now receive data */
1232 len = svc_recvfrom(rqstp, vec, pnum, len);
1236 dprintk("svc: TCP complete record (%d bytes)\n", len);
1237 rqstp->rq_arg.len = len;
1238 rqstp->rq_arg.page_base = 0;
1239 if (len <= rqstp->rq_arg.head[0].iov_len) {
1240 rqstp->rq_arg.head[0].iov_len = len;
1241 rqstp->rq_arg.page_len = 0;
1243 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
1246 rqstp->rq_xprt_ctxt = NULL;
1247 rqstp->rq_prot = IPPROTO_TCP;
1249 /* Reset TCP read info */
1250 svsk->sk_reclen = 0;
1251 svsk->sk_tcplen = 0;
1253 svc_xprt_received(&svsk->sk_xprt);
1255 serv->sv_stats->nettcpcnt++;
1260 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
1264 if (len == -EAGAIN) {
1265 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1266 svc_xprt_received(&svsk->sk_xprt);
1268 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
1269 svsk->sk_xprt.xpt_server->sv_name, -len);
1277 * Send out data on TCP socket.
1280 svc_tcp_sendto(struct svc_rqst *rqstp)
1282 struct xdr_buf *xbufp = &rqstp->rq_res;
1286 /* Set up the first element of the reply kvec.
1287 * Any other kvecs that may be in use have been taken
1288 * care of by the server implementation itself.
1290 reclen = htonl(0x80000000|((xbufp->len ) - 4));
1291 memcpy(xbufp->head[0].iov_base, &reclen, 4);
1293 if (test_bit(XPT_DEAD, &rqstp->rq_sock->sk_xprt.xpt_flags))
1296 sent = svc_sendto(rqstp, &rqstp->rq_res);
1297 if (sent != xbufp->len) {
1298 printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
1299 rqstp->rq_sock->sk_xprt.xpt_server->sv_name,
1300 (sent<0)?"got error":"sent only",
1302 set_bit(XPT_CLOSE, &rqstp->rq_sock->sk_xprt.xpt_flags);
1303 svc_xprt_enqueue(rqstp->rq_xprt);
1310 * Setup response header. TCP has a 4B record length field.
1312 static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
1314 struct kvec *resv = &rqstp->rq_res.head[0];
1316 /* tcp needs a space for the record length... */
1320 static int svc_tcp_has_wspace(struct svc_xprt *xprt)
1322 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1323 struct svc_serv *serv = svsk->sk_xprt.xpt_server;
1328 * Set the SOCK_NOSPACE flag before checking the available
1331 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
1332 required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg;
1333 wspace = sk_stream_wspace(svsk->sk_sk);
1335 if (wspace < sk_stream_min_wspace(svsk->sk_sk))
1337 if (required * 2 > wspace)
1340 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
1344 static struct svc_xprt *svc_tcp_create(struct svc_serv *serv,
1345 struct sockaddr *sa, int salen,
1348 return svc_create_socket(serv, IPPROTO_TCP, sa, salen, flags);
1351 static struct svc_xprt_ops svc_tcp_ops = {
1352 .xpo_create = svc_tcp_create,
1353 .xpo_recvfrom = svc_tcp_recvfrom,
1354 .xpo_sendto = svc_tcp_sendto,
1355 .xpo_release_rqst = svc_release_skb,
1356 .xpo_detach = svc_sock_detach,
1357 .xpo_free = svc_sock_free,
1358 .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr,
1359 .xpo_has_wspace = svc_tcp_has_wspace,
1360 .xpo_accept = svc_tcp_accept,
1363 static struct svc_xprt_class svc_tcp_class = {
1365 .xcl_owner = THIS_MODULE,
1366 .xcl_ops = &svc_tcp_ops,
1367 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
1370 void svc_init_xprt_sock(void)
1372 svc_reg_xprt_class(&svc_tcp_class);
1373 svc_reg_xprt_class(&svc_udp_class);
1376 void svc_cleanup_xprt_sock(void)
1378 svc_unreg_xprt_class(&svc_tcp_class);
1379 svc_unreg_xprt_class(&svc_udp_class);
1382 static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
1384 struct sock *sk = svsk->sk_sk;
1385 struct tcp_sock *tp = tcp_sk(sk);
1387 svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt, serv);
1389 if (sk->sk_state == TCP_LISTEN) {
1390 dprintk("setting up TCP socket for listening\n");
1391 set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags);
1392 sk->sk_data_ready = svc_tcp_listen_data_ready;
1393 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
1395 dprintk("setting up TCP socket for reading\n");
1396 sk->sk_state_change = svc_tcp_state_change;
1397 sk->sk_data_ready = svc_tcp_data_ready;
1398 sk->sk_write_space = svc_write_space;
1400 svsk->sk_reclen = 0;
1401 svsk->sk_tcplen = 0;
1403 tp->nonagle = 1; /* disable Nagle's algorithm */
1405 /* initialise setting must have enough space to
1406 * receive and respond to one request.
1407 * svc_tcp_recvfrom will re-adjust if necessary
1409 svc_sock_setbufsize(svsk->sk_sock,
1410 3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
1411 3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
1413 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1414 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1415 if (sk->sk_state != TCP_ESTABLISHED)
1416 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
1421 svc_sock_update_bufs(struct svc_serv *serv)
1424 * The number of server threads has changed. Update
1425 * rcvbuf and sndbuf accordingly on all sockets
1427 struct list_head *le;
1429 spin_lock_bh(&serv->sv_lock);
1430 list_for_each(le, &serv->sv_permsocks) {
1431 struct svc_sock *svsk =
1432 list_entry(le, struct svc_sock, sk_xprt.xpt_list);
1433 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1435 list_for_each(le, &serv->sv_tempsocks) {
1436 struct svc_sock *svsk =
1437 list_entry(le, struct svc_sock, sk_xprt.xpt_list);
1438 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1440 spin_unlock_bh(&serv->sv_lock);
1444 * Make sure that we don't have too many active connections. If we
1445 * have, something must be dropped.
1447 * There's no point in trying to do random drop here for DoS
1448 * prevention. The NFS clients does 1 reconnect in 15 seconds. An
1449 * attacker can easily beat that.
1451 * The only somewhat efficient mechanism would be if drop old
1452 * connections from the same IP first. But right now we don't even
1453 * record the client IP in svc_sock.
1455 static void svc_check_conn_limits(struct svc_serv *serv)
1457 if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) {
1458 struct svc_sock *svsk = NULL;
1459 spin_lock_bh(&serv->sv_lock);
1460 if (!list_empty(&serv->sv_tempsocks)) {
1461 if (net_ratelimit()) {
1462 /* Try to help the admin */
1463 printk(KERN_NOTICE "%s: too many open TCP "
1464 "sockets, consider increasing the "
1465 "number of nfsd threads\n",
1469 * Always select the oldest socket. It's not fair,
1472 svsk = list_entry(serv->sv_tempsocks.prev,
1475 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
1476 svc_xprt_get(&svsk->sk_xprt);
1478 spin_unlock_bh(&serv->sv_lock);
1481 svc_xprt_enqueue(&svsk->sk_xprt);
1482 svc_xprt_put(&svsk->sk_xprt);
1488 * Receive the next request on any socket. This code is carefully
1489 * organised not to touch any cachelines in the shared svc_serv
1490 * structure, only cachelines in the local svc_pool.
1493 svc_recv(struct svc_rqst *rqstp, long timeout)
1495 struct svc_sock *svsk = NULL;
1496 struct svc_serv *serv = rqstp->rq_server;
1497 struct svc_pool *pool = rqstp->rq_pool;
1500 struct xdr_buf *arg;
1501 DECLARE_WAITQUEUE(wait, current);
1503 dprintk("svc: server %p waiting for data (to = %ld)\n",
1508 "svc_recv: service %p, socket not NULL!\n",
1510 if (waitqueue_active(&rqstp->rq_wait))
1512 "svc_recv: service %p, wait queue active!\n",
1516 /* now allocate needed pages. If we get a failure, sleep briefly */
1517 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
1518 for (i=0; i < pages ; i++)
1519 while (rqstp->rq_pages[i] == NULL) {
1520 struct page *p = alloc_page(GFP_KERNEL);
1522 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1523 rqstp->rq_pages[i] = p;
1525 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
1526 BUG_ON(pages >= RPCSVC_MAXPAGES);
1528 /* Make arg->head point to first page and arg->pages point to rest */
1529 arg = &rqstp->rq_arg;
1530 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
1531 arg->head[0].iov_len = PAGE_SIZE;
1532 arg->pages = rqstp->rq_pages + 1;
1534 /* save at least one page for response */
1535 arg->page_len = (pages-2)*PAGE_SIZE;
1536 arg->len = (pages-1)*PAGE_SIZE;
1537 arg->tail[0].iov_len = 0;
1544 spin_lock_bh(&pool->sp_lock);
1545 if ((svsk = svc_sock_dequeue(pool)) != NULL) {
1546 rqstp->rq_sock = svsk;
1547 svc_xprt_get(&svsk->sk_xprt);
1548 rqstp->rq_reserved = serv->sv_max_mesg;
1549 atomic_add(rqstp->rq_reserved, &svsk->sk_xprt.xpt_reserved);
1551 /* No data pending. Go to sleep */
1552 svc_thread_enqueue(pool, rqstp);
1555 * We have to be able to interrupt this wait
1556 * to bring down the daemons ...
1558 set_current_state(TASK_INTERRUPTIBLE);
1559 add_wait_queue(&rqstp->rq_wait, &wait);
1560 spin_unlock_bh(&pool->sp_lock);
1562 schedule_timeout(timeout);
1566 spin_lock_bh(&pool->sp_lock);
1567 remove_wait_queue(&rqstp->rq_wait, &wait);
1569 if (!(svsk = rqstp->rq_sock)) {
1570 svc_thread_dequeue(pool, rqstp);
1571 spin_unlock_bh(&pool->sp_lock);
1572 dprintk("svc: server %p, no data yet\n", rqstp);
1573 return signalled()? -EINTR : -EAGAIN;
1576 spin_unlock_bh(&pool->sp_lock);
1579 if (test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)) {
1580 dprintk("svc_recv: found XPT_CLOSE\n");
1581 svc_delete_xprt(&svsk->sk_xprt);
1582 } else if (test_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags)) {
1583 struct svc_xprt *newxpt;
1584 newxpt = svsk->sk_xprt.xpt_ops->xpo_accept(&svsk->sk_xprt);
1587 * We know this module_get will succeed because the
1588 * listener holds a reference too
1590 __module_get(newxpt->xpt_class->xcl_owner);
1591 svc_check_conn_limits(svsk->sk_xprt.xpt_server);
1592 svc_xprt_received(newxpt);
1594 svc_xprt_received(&svsk->sk_xprt);
1596 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
1597 rqstp, pool->sp_id, svsk,
1598 atomic_read(&svsk->sk_xprt.xpt_ref.refcount));
1599 len = svsk->sk_xprt.xpt_ops->xpo_recvfrom(rqstp);
1600 dprintk("svc: got len=%d\n", len);
1603 /* No data, incomplete (TCP) read, or accept() */
1604 if (len == 0 || len == -EAGAIN) {
1605 rqstp->rq_res.len = 0;
1606 svc_sock_release(rqstp);
1609 clear_bit(XPT_OLD, &svsk->sk_xprt.xpt_flags);
1611 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
1612 rqstp->rq_chandle.defer = svc_defer;
1615 serv->sv_stats->netcnt++;
1623 svc_drop(struct svc_rqst *rqstp)
1625 dprintk("svc: socket %p dropped request\n", rqstp->rq_sock);
1626 svc_sock_release(rqstp);
1630 * Return reply to client.
1633 svc_send(struct svc_rqst *rqstp)
1635 struct svc_xprt *xprt;
1639 xprt = rqstp->rq_xprt;
1643 /* release the receive skb before sending the reply */
1644 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
1646 /* calculate over-all length */
1647 xb = & rqstp->rq_res;
1648 xb->len = xb->head[0].iov_len +
1650 xb->tail[0].iov_len;
1652 /* Grab mutex to serialize outgoing data. */
1653 mutex_lock(&xprt->xpt_mutex);
1654 if (test_bit(XPT_DEAD, &xprt->xpt_flags))
1657 len = xprt->xpt_ops->xpo_sendto(rqstp);
1658 mutex_unlock(&xprt->xpt_mutex);
1659 svc_sock_release(rqstp);
1661 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
1667 * Timer function to close old temporary sockets, using
1668 * a mark-and-sweep algorithm.
1671 svc_age_temp_sockets(unsigned long closure)
1673 struct svc_serv *serv = (struct svc_serv *)closure;
1674 struct svc_sock *svsk;
1675 struct list_head *le, *next;
1676 LIST_HEAD(to_be_aged);
1678 dprintk("svc_age_temp_sockets\n");
1680 if (!spin_trylock_bh(&serv->sv_lock)) {
1681 /* busy, try again 1 sec later */
1682 dprintk("svc_age_temp_sockets: busy\n");
1683 mod_timer(&serv->sv_temptimer, jiffies + HZ);
1687 list_for_each_safe(le, next, &serv->sv_tempsocks) {
1688 svsk = list_entry(le, struct svc_sock, sk_xprt.xpt_list);
1690 if (!test_and_set_bit(XPT_OLD, &svsk->sk_xprt.xpt_flags))
1692 if (atomic_read(&svsk->sk_xprt.xpt_ref.refcount) > 1
1693 || test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags))
1695 svc_xprt_get(&svsk->sk_xprt);
1696 list_move(le, &to_be_aged);
1697 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
1698 set_bit(XPT_DETACHED, &svsk->sk_xprt.xpt_flags);
1700 spin_unlock_bh(&serv->sv_lock);
1702 while (!list_empty(&to_be_aged)) {
1703 le = to_be_aged.next;
1704 /* fiddling the sk_xprt.xpt_list node is safe 'cos we're XPT_DETACHED */
1706 svsk = list_entry(le, struct svc_sock, sk_xprt.xpt_list);
1708 dprintk("queuing svsk %p for closing\n", svsk);
1710 /* a thread will dequeue and close it soon */
1711 svc_xprt_enqueue(&svsk->sk_xprt);
1712 svc_xprt_put(&svsk->sk_xprt);
1715 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
1719 * Initialize socket for RPC use and create svc_sock struct
1720 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1722 static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1723 struct socket *sock,
1724 int *errp, int flags)
1726 struct svc_sock *svsk;
1728 int pmap_register = !(flags & SVC_SOCK_ANONYMOUS);
1729 int is_temporary = flags & SVC_SOCK_TEMPORARY;
1731 dprintk("svc: svc_setup_socket %p\n", sock);
1732 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) {
1739 /* Register socket with portmapper */
1740 if (*errp >= 0 && pmap_register)
1741 *errp = svc_register(serv, inet->sk_protocol,
1742 ntohs(inet_sk(inet)->sport));
1749 set_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags);
1750 inet->sk_user_data = svsk;
1751 svsk->sk_sock = sock;
1753 svsk->sk_ostate = inet->sk_state_change;
1754 svsk->sk_odata = inet->sk_data_ready;
1755 svsk->sk_owspace = inet->sk_write_space;
1756 spin_lock_init(&svsk->sk_lock);
1757 INIT_LIST_HEAD(&svsk->sk_deferred);
1759 /* Initialize the socket */
1760 if (sock->type == SOCK_DGRAM)
1761 svc_udp_init(svsk, serv);
1763 svc_tcp_init(svsk, serv);
1765 spin_lock_bh(&serv->sv_lock);
1767 set_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags);
1768 list_add(&svsk->sk_xprt.xpt_list, &serv->sv_tempsocks);
1770 if (serv->sv_temptimer.function == NULL) {
1771 /* setup timer to age temp sockets */
1772 setup_timer(&serv->sv_temptimer, svc_age_temp_sockets,
1773 (unsigned long)serv);
1774 mod_timer(&serv->sv_temptimer,
1775 jiffies + svc_conn_age_period * HZ);
1778 clear_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags);
1779 list_add(&svsk->sk_xprt.xpt_list, &serv->sv_permsocks);
1781 spin_unlock_bh(&serv->sv_lock);
1783 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1789 int svc_addsock(struct svc_serv *serv,
1795 struct socket *so = sockfd_lookup(fd, &err);
1796 struct svc_sock *svsk = NULL;
1800 if (so->sk->sk_family != AF_INET)
1801 err = -EAFNOSUPPORT;
1802 else if (so->sk->sk_protocol != IPPROTO_TCP &&
1803 so->sk->sk_protocol != IPPROTO_UDP)
1804 err = -EPROTONOSUPPORT;
1805 else if (so->state > SS_UNCONNECTED)
1808 svsk = svc_setup_socket(serv, so, &err, SVC_SOCK_DEFAULTS);
1810 svc_xprt_received(&svsk->sk_xprt);
1818 if (proto) *proto = so->sk->sk_protocol;
1819 return one_sock_name(name_return, svsk);
1821 EXPORT_SYMBOL_GPL(svc_addsock);
1824 * Create socket for RPC service.
1826 static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
1828 struct sockaddr *sin, int len,
1831 struct svc_sock *svsk;
1832 struct socket *sock;
1835 char buf[RPC_MAX_ADDRBUFLEN];
1837 dprintk("svc: svc_create_socket(%s, %d, %s)\n",
1838 serv->sv_program->pg_name, protocol,
1839 __svc_print_addr(sin, buf, sizeof(buf)));
1841 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
1842 printk(KERN_WARNING "svc: only UDP and TCP "
1843 "sockets supported\n");
1844 return ERR_PTR(-EINVAL);
1846 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
1848 error = sock_create_kern(sin->sa_family, type, protocol, &sock);
1850 return ERR_PTR(error);
1852 svc_reclassify_socket(sock);
1854 if (type == SOCK_STREAM)
1855 sock->sk->sk_reuse = 1; /* allow address reuse */
1856 error = kernel_bind(sock, sin, len);
1860 if (protocol == IPPROTO_TCP) {
1861 if ((error = kernel_listen(sock, 64)) < 0)
1865 if ((svsk = svc_setup_socket(serv, sock, &error, flags)) != NULL) {
1866 svc_xprt_received(&svsk->sk_xprt);
1867 return (struct svc_xprt *)svsk;
1871 dprintk("svc: svc_create_socket error = %d\n", -error);
1873 return ERR_PTR(error);
1877 * Detach the svc_sock from the socket so that no
1878 * more callbacks occur.
1880 static void svc_sock_detach(struct svc_xprt *xprt)
1882 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1883 struct sock *sk = svsk->sk_sk;
1885 dprintk("svc: svc_sock_detach(%p)\n", svsk);
1887 /* put back the old socket callbacks */
1888 sk->sk_state_change = svsk->sk_ostate;
1889 sk->sk_data_ready = svsk->sk_odata;
1890 sk->sk_write_space = svsk->sk_owspace;
1894 * Free the svc_sock's socket resources and the svc_sock itself.
1896 static void svc_sock_free(struct svc_xprt *xprt)
1898 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1899 dprintk("svc: svc_sock_free(%p)\n", svsk);
1901 if (svsk->sk_info_authunix != NULL)
1902 svcauth_unix_info_release(svsk->sk_info_authunix);
1903 if (svsk->sk_sock->file)
1904 sockfd_put(svsk->sk_sock);
1906 sock_release(svsk->sk_sock);
1911 * Remove a dead transport
1913 static void svc_delete_xprt(struct svc_xprt *xprt)
1915 struct svc_serv *serv = xprt->xpt_server;
1917 dprintk("svc: svc_delete_xprt(%p)\n", xprt);
1918 xprt->xpt_ops->xpo_detach(xprt);
1920 spin_lock_bh(&serv->sv_lock);
1921 if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
1922 list_del_init(&xprt->xpt_list);
1924 * We used to delete the transport from whichever list
1925 * it's sk_xprt.xpt_ready node was on, but we don't actually
1926 * need to. This is because the only time we're called
1927 * while still attached to a queue, the queue itself
1928 * is about to be destroyed (in svc_destroy).
1930 if (!test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) {
1931 BUG_ON(atomic_read(&xprt->xpt_ref.refcount) < 2);
1932 if (test_bit(XPT_TEMP, &xprt->xpt_flags))
1936 spin_unlock_bh(&serv->sv_lock);
1939 static void svc_close_xprt(struct svc_xprt *xprt)
1941 set_bit(XPT_CLOSE, &xprt->xpt_flags);
1942 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
1943 /* someone else will have to effect the close */
1947 svc_delete_xprt(xprt);
1948 clear_bit(XPT_BUSY, &xprt->xpt_flags);
1952 void svc_close_all(struct list_head *xprt_list)
1954 struct svc_xprt *xprt;
1955 struct svc_xprt *tmp;
1957 list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
1958 set_bit(XPT_CLOSE, &xprt->xpt_flags);
1959 if (test_bit(XPT_BUSY, &xprt->xpt_flags)) {
1960 /* Waiting to be processed, but no threads left,
1961 * So just remove it from the waiting list
1963 list_del_init(&xprt->xpt_ready);
1964 clear_bit(XPT_BUSY, &xprt->xpt_flags);
1966 svc_close_xprt(xprt);
1971 * Handle defer and revisit of requests
1974 static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1976 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle);
1977 struct svc_sock *svsk;
1980 svc_xprt_put(&dr->svsk->sk_xprt);
1984 dprintk("revisit queued\n");
1987 spin_lock(&svsk->sk_lock);
1988 list_add(&dr->handle.recent, &svsk->sk_deferred);
1989 spin_unlock(&svsk->sk_lock);
1990 set_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags);
1991 svc_xprt_enqueue(&svsk->sk_xprt);
1992 svc_xprt_put(&svsk->sk_xprt);
1995 static struct cache_deferred_req *
1996 svc_defer(struct cache_req *req)
1998 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
1999 int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len);
2000 struct svc_deferred_req *dr;
2002 if (rqstp->rq_arg.page_len)
2003 return NULL; /* if more than a page, give up FIXME */
2004 if (rqstp->rq_deferred) {
2005 dr = rqstp->rq_deferred;
2006 rqstp->rq_deferred = NULL;
2008 int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
2009 /* FIXME maybe discard if size too large */
2010 dr = kmalloc(size, GFP_KERNEL);
2014 dr->handle.owner = rqstp->rq_server;
2015 dr->prot = rqstp->rq_prot;
2016 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen);
2017 dr->addrlen = rqstp->rq_addrlen;
2018 dr->daddr = rqstp->rq_daddr;
2019 dr->argslen = rqstp->rq_arg.len >> 2;
2020 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
2022 svc_xprt_get(rqstp->rq_xprt);
2023 dr->svsk = rqstp->rq_sock;
2025 dr->handle.revisit = svc_revisit;
2030 * recv data from a deferred request into an active one
2032 static int svc_deferred_recv(struct svc_rqst *rqstp)
2034 struct svc_deferred_req *dr = rqstp->rq_deferred;
2036 rqstp->rq_arg.head[0].iov_base = dr->args;
2037 rqstp->rq_arg.head[0].iov_len = dr->argslen<<2;
2038 rqstp->rq_arg.page_len = 0;
2039 rqstp->rq_arg.len = dr->argslen<<2;
2040 rqstp->rq_prot = dr->prot;
2041 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
2042 rqstp->rq_addrlen = dr->addrlen;
2043 rqstp->rq_daddr = dr->daddr;
2044 rqstp->rq_respages = rqstp->rq_pages;
2045 return dr->argslen<<2;
2049 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
2051 struct svc_deferred_req *dr = NULL;
2053 if (!test_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags))
2055 spin_lock(&svsk->sk_lock);
2056 clear_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags);
2057 if (!list_empty(&svsk->sk_deferred)) {
2058 dr = list_entry(svsk->sk_deferred.next,
2059 struct svc_deferred_req,
2061 list_del_init(&dr->handle.recent);
2062 set_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags);
2064 spin_unlock(&svsk->sk_lock);