2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The User Datagram Protocol (UDP).
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
12 * Hirokazu Takahashi, <taka@valinux.co.jp>
15 * Alan Cox : verify_area() calls
16 * Alan Cox : stopped close while in use off icmp
17 * messages. Not a fix but a botch that
18 * for udp at least is 'valid'.
19 * Alan Cox : Fixed icmp handling properly
20 * Alan Cox : Correct error for oversized datagrams
21 * Alan Cox : Tidied select() semantics.
22 * Alan Cox : udp_err() fixed properly, also now
23 * select and read wake correctly on errors
24 * Alan Cox : udp_send verify_area moved to avoid mem leak
25 * Alan Cox : UDP can count its memory
26 * Alan Cox : send to an unknown connection causes
27 * an ECONNREFUSED off the icmp, but
29 * Alan Cox : Switched to new sk_buff handlers. No more backlog!
30 * Alan Cox : Using generic datagram code. Even smaller and the PEEK
31 * bug no longer crashes it.
32 * Fred Van Kempen : Net2e support for sk->broadcast.
33 * Alan Cox : Uses skb_free_datagram
34 * Alan Cox : Added get/set sockopt support.
35 * Alan Cox : Broadcasting without option set returns EACCES.
36 * Alan Cox : No wakeup calls. Instead we now use the callbacks.
37 * Alan Cox : Use ip_tos and ip_ttl
38 * Alan Cox : SNMP Mibs
39 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
40 * Matt Dillon : UDP length checks.
41 * Alan Cox : Smarter af_inet used properly.
42 * Alan Cox : Use new kernel side addressing.
43 * Alan Cox : Incorrect return on truncated datagram receive.
44 * Arnt Gulbrandsen : New udp_send and stuff
45 * Alan Cox : Cache last socket
46 * Alan Cox : Route cache
47 * Jon Peatfield : Minor efficiency fix to sendto().
48 * Mike Shaver : RFC1122 checks.
49 * Alan Cox : Nonblocking error fix.
50 * Willy Konynenberg : Transparent proxying support.
51 * Mike McLagan : Routing by source
52 * David S. Miller : New socket lookup architecture.
53 * Last socket cache retained as it
54 * does have a high hit rate.
55 * Olaf Kirch : Don't linearise iovec on sendmsg.
56 * Andi Kleen : Some cleanups, cache destination entry
58 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
59 * Melvin Smith : Check msg_name not msg_namelen in sendto(),
60 * return ENOTCONN for unconnected sockets (POSIX)
61 * Janos Farkas : don't deliver multi/broadcasts to a different
62 * bound-to-device socket
63 * Hirokazu Takahashi : HW checksumming for outgoing UDP
65 * Hirokazu Takahashi : sendfile() on UDP works now.
66 * Arnaldo C. Melo : convert /proc/net/udp to seq_file
67 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
68 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
69 * a single port at the same time.
70 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
71 * James Chapman : Add L2TP encapsulation type.
74 * This program is free software; you can redistribute it and/or
75 * modify it under the terms of the GNU General Public License
76 * as published by the Free Software Foundation; either version
77 * 2 of the License, or (at your option) any later version.
80 #include <asm/system.h>
81 #include <asm/uaccess.h>
82 #include <asm/ioctls.h>
83 #include <linux/bootmem.h>
84 #include <linux/types.h>
85 #include <linux/fcntl.h>
86 #include <linux/module.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/igmp.h>
91 #include <linux/errno.h>
92 #include <linux/timer.h>
94 #include <linux/inet.h>
95 #include <linux/netdevice.h>
96 #include <net/tcp_states.h>
97 #include <linux/skbuff.h>
98 #include <linux/proc_fs.h>
99 #include <linux/seq_file.h>
100 #include <net/net_namespace.h>
101 #include <net/icmp.h>
102 #include <net/route.h>
103 #include <net/checksum.h>
104 #include <net/xfrm.h>
105 #include "udp_impl.h"
107 struct udp_table udp_table;
108 EXPORT_SYMBOL(udp_table);
110 int sysctl_udp_mem[3] __read_mostly;
111 int sysctl_udp_rmem_min __read_mostly;
112 int sysctl_udp_wmem_min __read_mostly;
114 EXPORT_SYMBOL(sysctl_udp_mem);
115 EXPORT_SYMBOL(sysctl_udp_rmem_min);
116 EXPORT_SYMBOL(sysctl_udp_wmem_min);
118 atomic_t udp_memory_allocated;
119 EXPORT_SYMBOL(udp_memory_allocated);
121 static int udp_lib_lport_inuse(struct net *net, __u16 num,
122 const struct udp_hslot *hslot,
124 int (*saddr_comp)(const struct sock *sk1,
125 const struct sock *sk2))
128 struct hlist_node *node;
130 sk_for_each(sk2, node, &hslot->head)
131 if (net_eq(sock_net(sk2), net) &&
133 sk2->sk_hash == num &&
134 (!sk2->sk_reuse || !sk->sk_reuse) &&
135 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
136 || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
137 (*saddr_comp)(sk, sk2))
143 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
145 * @sk: socket struct in question
146 * @snum: port number to look up
147 * @saddr_comp: AF-dependent comparison of bound local IP addresses
149 int udp_lib_get_port(struct sock *sk, unsigned short snum,
150 int (*saddr_comp)(const struct sock *sk1,
151 const struct sock *sk2 ) )
153 struct udp_hslot *hslot;
154 struct udp_table *udptable = sk->sk_prot->h.udp_table;
156 struct net *net = sock_net(sk);
159 int low, high, remaining;
161 unsigned short first;
163 inet_get_local_port_range(&low, &high);
164 remaining = (high - low) + 1;
167 snum = first = rand % remaining + low;
170 hslot = &udptable->hash[udp_hashfn(net, snum)];
171 spin_lock_bh(&hslot->lock);
172 if (!udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp))
174 spin_unlock_bh(&hslot->lock);
177 } while (snum < low || snum > high);
182 hslot = &udptable->hash[udp_hashfn(net, snum)];
183 spin_lock_bh(&hslot->lock);
184 if (udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp))
187 inet_sk(sk)->num = snum;
189 if (sk_unhashed(sk)) {
190 sk_add_node_rcu(sk, &hslot->head);
191 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
195 spin_unlock_bh(&hslot->lock);
200 static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
202 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
204 return ( !ipv6_only_sock(sk2) &&
205 (!inet1->rcv_saddr || !inet2->rcv_saddr ||
206 inet1->rcv_saddr == inet2->rcv_saddr ));
209 int udp_v4_get_port(struct sock *sk, unsigned short snum)
211 return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal);
214 static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr,
216 __be16 sport, __be32 daddr, __be16 dport, int dif)
220 if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
221 !ipv6_only_sock(sk)) {
222 struct inet_sock *inet = inet_sk(sk);
224 score = (sk->sk_family == PF_INET ? 1 : 0);
225 if (inet->rcv_saddr) {
226 if (inet->rcv_saddr != daddr)
231 if (inet->daddr != saddr)
236 if (inet->dport != sport)
240 if (sk->sk_bound_dev_if) {
241 if (sk->sk_bound_dev_if != dif)
249 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
250 * harder than this. -DaveM
252 static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
253 __be16 sport, __be32 daddr, __be16 dport,
254 int dif, struct udp_table *udptable)
256 struct sock *sk, *result;
257 struct hlist_node *node;
258 unsigned short hnum = ntohs(dport);
259 unsigned int hash = udp_hashfn(net, hnum);
260 struct udp_hslot *hslot = &udptable->hash[hash];
267 sk_for_each_rcu(sk, node, &hslot->head) {
269 * lockless reader, and SLAB_DESTROY_BY_RCU items:
270 * We must check this item was not moved to another chain
272 if (udp_hashfn(net, sk->sk_hash) != hash)
274 score = compute_score(sk, net, saddr, hnum, sport,
276 if (score > badness) {
282 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
284 else if (unlikely(compute_score(result, net, saddr, hnum, sport,
285 daddr, dport, dif) < badness)) {
294 static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
295 __be16 sport, __be16 dport,
296 struct udp_table *udptable)
299 const struct iphdr *iph = ip_hdr(skb);
301 if (unlikely(sk = skb_steal_sock(skb)))
304 return __udp4_lib_lookup(dev_net(skb->dst->dev), iph->saddr, sport,
305 iph->daddr, dport, inet_iif(skb),
309 struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
310 __be32 daddr, __be16 dport, int dif)
312 return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
314 EXPORT_SYMBOL_GPL(udp4_lib_lookup);
316 static inline struct sock *udp_v4_mcast_next(struct sock *sk,
317 __be16 loc_port, __be32 loc_addr,
318 __be16 rmt_port, __be32 rmt_addr,
321 struct hlist_node *node;
323 unsigned short hnum = ntohs(loc_port);
325 sk_for_each_from(s, node) {
326 struct inet_sock *inet = inet_sk(s);
328 if (s->sk_hash != hnum ||
329 (inet->daddr && inet->daddr != rmt_addr) ||
330 (inet->dport != rmt_port && inet->dport) ||
331 (inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
333 (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
335 if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
345 * This routine is called by the ICMP module when it gets some
346 * sort of error condition. If err < 0 then the socket should
347 * be closed and the error returned to the user. If err > 0
348 * it's just the icmp type << 8 | icmp code.
349 * Header points to the ip header of the error packet. We move
350 * on past this. Then (as it used to claim before adjustment)
351 * header points to the first 8 bytes of the udp header. We need
352 * to find the appropriate port.
355 void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
357 struct inet_sock *inet;
358 struct iphdr *iph = (struct iphdr*)skb->data;
359 struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2));
360 const int type = icmp_hdr(skb)->type;
361 const int code = icmp_hdr(skb)->code;
365 struct net *net = dev_net(skb->dev);
367 sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
368 iph->saddr, uh->source, skb->dev->ifindex, udptable);
370 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
371 return; /* No socket for error */
380 case ICMP_TIME_EXCEEDED:
383 case ICMP_SOURCE_QUENCH:
385 case ICMP_PARAMETERPROB:
389 case ICMP_DEST_UNREACH:
390 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
391 if (inet->pmtudisc != IP_PMTUDISC_DONT) {
399 if (code <= NR_ICMP_UNREACH) {
400 harderr = icmp_err_convert[code].fatal;
401 err = icmp_err_convert[code].errno;
407 * RFC1122: OK. Passes ICMP errors back to application, as per
410 if (!inet->recverr) {
411 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
414 ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1));
417 sk->sk_error_report(sk);
422 void udp_err(struct sk_buff *skb, u32 info)
424 __udp4_lib_err(skb, info, &udp_table);
428 * Throw away all pending data and cancel the corking. Socket is locked.
430 void udp_flush_pending_frames(struct sock *sk)
432 struct udp_sock *up = udp_sk(sk);
437 ip_flush_pending_frames(sk);
440 EXPORT_SYMBOL(udp_flush_pending_frames);
443 * udp4_hwcsum_outgoing - handle outgoing HW checksumming
444 * @sk: socket we are sending on
445 * @skb: sk_buff containing the filled-in UDP header
446 * (checksum field must be zeroed out)
448 static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
449 __be32 src, __be32 dst, int len )
452 struct udphdr *uh = udp_hdr(skb);
455 if (skb_queue_len(&sk->sk_write_queue) == 1) {
457 * Only one fragment on the socket.
459 skb->csum_start = skb_transport_header(skb) - skb->head;
460 skb->csum_offset = offsetof(struct udphdr, check);
461 uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0);
464 * HW-checksum won't work as there are two or more
465 * fragments on the socket so that all csums of sk_buffs
468 offset = skb_transport_offset(skb);
469 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
471 skb->ip_summed = CHECKSUM_NONE;
473 skb_queue_walk(&sk->sk_write_queue, skb) {
474 csum = csum_add(csum, skb->csum);
477 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
479 uh->check = CSUM_MANGLED_0;
484 * Push out all pending data as one UDP datagram. Socket is locked.
486 static int udp_push_pending_frames(struct sock *sk)
488 struct udp_sock *up = udp_sk(sk);
489 struct inet_sock *inet = inet_sk(sk);
490 struct flowi *fl = &inet->cork.fl;
494 int is_udplite = IS_UDPLITE(sk);
497 /* Grab the skbuff where UDP header space exists. */
498 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
502 * Create a UDP header
505 uh->source = fl->fl_ip_sport;
506 uh->dest = fl->fl_ip_dport;
507 uh->len = htons(up->len);
510 if (is_udplite) /* UDP-Lite */
511 csum = udplite_csum_outgoing(sk, skb);
513 else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */
515 skb->ip_summed = CHECKSUM_NONE;
518 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
520 udp4_hwcsum_outgoing(sk, skb, fl->fl4_src,fl->fl4_dst, up->len);
523 } else /* `normal' UDP */
524 csum = udp_csum_outgoing(sk, skb);
526 /* add protocol-dependent pseudo-header */
527 uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len,
528 sk->sk_protocol, csum );
530 uh->check = CSUM_MANGLED_0;
533 err = ip_push_pending_frames(sk);
538 UDP_INC_STATS_USER(sock_net(sk),
539 UDP_MIB_OUTDATAGRAMS, is_udplite);
543 int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
546 struct inet_sock *inet = inet_sk(sk);
547 struct udp_sock *up = udp_sk(sk);
549 struct ipcm_cookie ipc;
550 struct rtable *rt = NULL;
553 __be32 daddr, faddr, saddr;
556 int err, is_udplite = IS_UDPLITE(sk);
557 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
558 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
567 if (msg->msg_flags&MSG_OOB) /* Mirror BSD error message compatibility */
574 * There are pending frames.
575 * The socket lock must be held while it's corked.
578 if (likely(up->pending)) {
579 if (unlikely(up->pending != AF_INET)) {
587 ulen += sizeof(struct udphdr);
590 * Get and verify the address.
593 struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name;
594 if (msg->msg_namelen < sizeof(*usin))
596 if (usin->sin_family != AF_INET) {
597 if (usin->sin_family != AF_UNSPEC)
598 return -EAFNOSUPPORT;
601 daddr = usin->sin_addr.s_addr;
602 dport = usin->sin_port;
606 if (sk->sk_state != TCP_ESTABLISHED)
607 return -EDESTADDRREQ;
610 /* Open fast path for connected socket.
611 Route will not be used, if at least one option is set.
615 ipc.addr = inet->saddr;
617 ipc.oif = sk->sk_bound_dev_if;
618 if (msg->msg_controllen) {
619 err = ip_cmsg_send(sock_net(sk), msg, &ipc);
630 ipc.addr = faddr = daddr;
632 if (ipc.opt && ipc.opt->srr) {
635 faddr = ipc.opt->faddr;
638 tos = RT_TOS(inet->tos);
639 if (sock_flag(sk, SOCK_LOCALROUTE) ||
640 (msg->msg_flags & MSG_DONTROUTE) ||
641 (ipc.opt && ipc.opt->is_strictroute)) {
646 if (ipv4_is_multicast(daddr)) {
648 ipc.oif = inet->mc_index;
650 saddr = inet->mc_addr;
655 rt = (struct rtable*)sk_dst_check(sk, 0);
658 struct flowi fl = { .oif = ipc.oif,
663 .proto = sk->sk_protocol,
665 { .sport = inet->sport,
666 .dport = dport } } };
667 struct net *net = sock_net(sk);
669 security_sk_classify_flow(sk, &fl);
670 err = ip_route_output_flow(net, &rt, &fl, sk, 1);
672 if (err == -ENETUNREACH)
673 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
678 if ((rt->rt_flags & RTCF_BROADCAST) &&
679 !sock_flag(sk, SOCK_BROADCAST))
682 sk_dst_set(sk, dst_clone(&rt->u.dst));
685 if (msg->msg_flags&MSG_CONFIRM)
691 daddr = ipc.addr = rt->rt_dst;
694 if (unlikely(up->pending)) {
695 /* The socket is already corked while preparing it. */
696 /* ... which is an evident application bug. --ANK */
699 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
704 * Now cork the socket to pend data.
706 inet->cork.fl.fl4_dst = daddr;
707 inet->cork.fl.fl_ip_dport = dport;
708 inet->cork.fl.fl4_src = saddr;
709 inet->cork.fl.fl_ip_sport = inet->sport;
710 up->pending = AF_INET;
714 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
715 err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
716 sizeof(struct udphdr), &ipc, rt,
717 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
719 udp_flush_pending_frames(sk);
721 err = udp_push_pending_frames(sk);
722 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
733 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
734 * ENOBUFS might not be good (it's not tunable per se), but otherwise
735 * we don't have a good statistic (IpOutDiscards but it can be too many
736 * things). We could add another new stat but at least for now that
737 * seems like overkill.
739 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
740 UDP_INC_STATS_USER(sock_net(sk),
741 UDP_MIB_SNDBUFERRORS, is_udplite);
746 dst_confirm(&rt->u.dst);
747 if (!(msg->msg_flags&MSG_PROBE) || len)
748 goto back_from_confirm;
753 int udp_sendpage(struct sock *sk, struct page *page, int offset,
754 size_t size, int flags)
756 struct udp_sock *up = udp_sk(sk);
760 struct msghdr msg = { .msg_flags = flags|MSG_MORE };
762 /* Call udp_sendmsg to specify destination address which
763 * sendpage interface can't pass.
764 * This will succeed only when the socket is connected.
766 ret = udp_sendmsg(NULL, sk, &msg, 0);
773 if (unlikely(!up->pending)) {
776 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n");
780 ret = ip_append_page(sk, page, offset, size, flags);
781 if (ret == -EOPNOTSUPP) {
783 return sock_no_sendpage(sk->sk_socket, page, offset,
787 udp_flush_pending_frames(sk);
792 if (!(up->corkflag || (flags&MSG_MORE)))
793 ret = udp_push_pending_frames(sk);
802 * IOCTL requests applicable to the UDP protocol
805 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
810 int amount = atomic_read(&sk->sk_wmem_alloc);
811 return put_user(amount, (int __user *)arg);
817 unsigned long amount;
820 spin_lock_bh(&sk->sk_receive_queue.lock);
821 skb = skb_peek(&sk->sk_receive_queue);
824 * We will only return the amount
825 * of this packet since that is all
828 amount = skb->len - sizeof(struct udphdr);
830 spin_unlock_bh(&sk->sk_receive_queue.lock);
831 return put_user(amount, (int __user *)arg);
842 * This should be easy, if there is something there we
843 * return it, otherwise we block.
846 int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
847 size_t len, int noblock, int flags, int *addr_len)
849 struct inet_sock *inet = inet_sk(sk);
850 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
852 unsigned int ulen, copied;
855 int is_udplite = IS_UDPLITE(sk);
858 * Check any passed addresses
861 *addr_len=sizeof(*sin);
863 if (flags & MSG_ERRQUEUE)
864 return ip_recv_error(sk, msg, len);
867 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
872 ulen = skb->len - sizeof(struct udphdr);
876 else if (copied < ulen)
877 msg->msg_flags |= MSG_TRUNC;
880 * If checksum is needed at all, try to do it while copying the
881 * data. If the data is truncated, or if we only want a partial
882 * coverage checksum (UDP-Lite), do it before the copy.
885 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
886 if (udp_lib_checksum_complete(skb))
890 if (skb_csum_unnecessary(skb))
891 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
892 msg->msg_iov, copied );
894 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
904 UDP_INC_STATS_USER(sock_net(sk),
905 UDP_MIB_INDATAGRAMS, is_udplite);
907 sock_recv_timestamp(msg, sk, skb);
909 /* Copy the address. */
912 sin->sin_family = AF_INET;
913 sin->sin_port = udp_hdr(skb)->source;
914 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
915 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
917 if (inet->cmsg_flags)
918 ip_cmsg_recv(msg, skb);
921 if (flags & MSG_TRUNC)
926 skb_free_datagram(sk, skb);
933 if (!skb_kill_datagram(sk, skb, flags))
934 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
943 int udp_disconnect(struct sock *sk, int flags)
945 struct inet_sock *inet = inet_sk(sk);
947 * 1003.1g - break association.
950 sk->sk_state = TCP_CLOSE;
953 sk->sk_bound_dev_if = 0;
954 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
955 inet_reset_saddr(sk);
957 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
958 sk->sk_prot->unhash(sk);
965 void udp_lib_unhash(struct sock *sk)
967 struct udp_table *udptable = sk->sk_prot->h.udp_table;
968 unsigned int hash = udp_hashfn(sock_net(sk), sk->sk_hash);
969 struct udp_hslot *hslot = &udptable->hash[hash];
971 spin_lock(&hslot->lock);
972 if (sk_del_node_init_rcu(sk)) {
973 inet_sk(sk)->num = 0;
974 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
976 spin_unlock(&hslot->lock);
978 EXPORT_SYMBOL(udp_lib_unhash);
980 static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
982 int is_udplite = IS_UDPLITE(sk);
985 if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
986 /* Note that an ENOMEM error is charged twice */
988 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
996 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1004 * >0: "udp encap" protocol resubmission
1006 * Note that in the success and error cases, the skb is assumed to
1007 * have either been requeued or freed.
1009 int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
1011 struct udp_sock *up = udp_sk(sk);
1013 int is_udplite = IS_UDPLITE(sk);
1016 * Charge it to the socket, dropping if the queue is full.
1018 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1022 if (up->encap_type) {
1024 * This is an encapsulation socket so pass the skb to
1025 * the socket's udp_encap_rcv() hook. Otherwise, just
1026 * fall through and pass this up the UDP socket.
1027 * up->encap_rcv() returns the following value:
1028 * =0 if skb was successfully passed to the encap
1029 * handler or was discarded by it.
1030 * >0 if skb should be passed on to UDP.
1031 * <0 if skb should be resubmitted as proto -N
1034 /* if we're overly short, let UDP handle it */
1035 if (skb->len > sizeof(struct udphdr) &&
1036 up->encap_rcv != NULL) {
1039 ret = (*up->encap_rcv)(sk, skb);
1041 UDP_INC_STATS_BH(sock_net(sk),
1042 UDP_MIB_INDATAGRAMS,
1048 /* FALLTHROUGH -- it's a UDP Packet */
1052 * UDP-Lite specific tests, ignored on UDP sockets
1054 if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
1057 * MIB statistics other than incrementing the error count are
1058 * disabled for the following two types of errors: these depend
1059 * on the application settings, not on the functioning of the
1060 * protocol stack as such.
1062 * RFC 3828 here recommends (sec 3.3): "There should also be a
1063 * way ... to ... at least let the receiving application block
1064 * delivery of packets with coverage values less than a value
1065 * provided by the application."
1067 if (up->pcrlen == 0) { /* full coverage was set */
1068 LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage "
1069 "%d while full coverage %d requested\n",
1070 UDP_SKB_CB(skb)->cscov, skb->len);
1073 /* The next case involves violating the min. coverage requested
1074 * by the receiver. This is subtle: if receiver wants x and x is
1075 * greater than the buffersize/MTU then receiver will complain
1076 * that it wants x while sender emits packets of smaller size y.
1077 * Therefore the above ...()->partial_cov statement is essential.
1079 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
1080 LIMIT_NETDEBUG(KERN_WARNING
1081 "UDPLITE: coverage %d too small, need min %d\n",
1082 UDP_SKB_CB(skb)->cscov, up->pcrlen);
1087 if (sk->sk_filter) {
1088 if (udp_lib_checksum_complete(skb))
1095 if (!sock_owned_by_user(sk))
1096 rc = __udp_queue_rcv_skb(sk, skb);
1098 sk_add_backlog(sk, skb);
1104 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1110 * Multicasts and broadcasts go to each listener.
1112 * Note: called only from the BH handler context,
1113 * so we don't need to lock the hashes.
1115 static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1117 __be32 saddr, __be32 daddr,
1118 struct udp_table *udptable)
1121 struct udp_hslot *hslot = &udptable->hash[udp_hashfn(net, ntohs(uh->dest))];
1124 spin_lock(&hslot->lock);
1125 sk = sk_head(&hslot->head);
1126 dif = skb->dev->ifindex;
1127 sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
1129 struct sock *sknext = NULL;
1132 struct sk_buff *skb1 = skb;
1134 sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr,
1135 uh->source, saddr, dif);
1137 skb1 = skb_clone(skb, GFP_ATOMIC);
1140 int ret = udp_queue_rcv_skb(sk, skb1);
1142 /* we should probably re-process instead
1143 * of dropping packets here. */
1150 spin_unlock(&hslot->lock);
1154 /* Initialize UDP checksum. If exited with zero value (success),
1155 * CHECKSUM_UNNECESSARY means, that no more checks are required.
1156 * Otherwise, csum completion requires chacksumming packet body,
1157 * including udp header and folding it to skb->csum.
1159 static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
1162 const struct iphdr *iph;
1165 UDP_SKB_CB(skb)->partial_cov = 0;
1166 UDP_SKB_CB(skb)->cscov = skb->len;
1168 if (proto == IPPROTO_UDPLITE) {
1169 err = udplite_checksum_init(skb, uh);
1175 if (uh->check == 0) {
1176 skb->ip_summed = CHECKSUM_UNNECESSARY;
1177 } else if (skb->ip_summed == CHECKSUM_COMPLETE) {
1178 if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
1180 skb->ip_summed = CHECKSUM_UNNECESSARY;
1182 if (!skb_csum_unnecessary(skb))
1183 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1184 skb->len, proto, 0);
1185 /* Probably, we should checksum udp header (it should be in cache
1186 * in any case) and data in tiny packets (< rx copybreak).
1193 * All we need to do is get the socket, and then do a checksum.
1196 int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1200 struct udphdr *uh = udp_hdr(skb);
1201 unsigned short ulen;
1202 struct rtable *rt = (struct rtable*)skb->dst;
1203 __be32 saddr = ip_hdr(skb)->saddr;
1204 __be32 daddr = ip_hdr(skb)->daddr;
1205 struct net *net = dev_net(skb->dev);
1208 * Validate the packet.
1210 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1211 goto drop; /* No space for header. */
1213 ulen = ntohs(uh->len);
1214 if (ulen > skb->len)
1217 if (proto == IPPROTO_UDP) {
1218 /* UDP validates ulen. */
1219 if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
1224 if (udp4_csum_init(skb, uh, proto))
1227 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1228 return __udp4_lib_mcast_deliver(net, skb, uh,
1229 saddr, daddr, udptable);
1231 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1234 int ret = udp_queue_rcv_skb(sk, skb);
1237 /* a return value > 0 means to resubmit the input, but
1238 * it wants the return to be -protocol, or 0
1245 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1249 /* No socket. Drop packet silently, if checksum is wrong */
1250 if (udp_lib_checksum_complete(skb))
1253 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1254 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
1257 * Hmm. We got an UDP packet to a port to which we
1258 * don't wanna listen. Ignore it.
1264 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From " NIPQUAD_FMT ":%u %d/%d to " NIPQUAD_FMT ":%u\n",
1265 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1276 * RFC1122: OK. Discards the bad packet silently (as far as
1277 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1279 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From " NIPQUAD_FMT ":%u to " NIPQUAD_FMT ":%u ulen %d\n",
1280 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1287 UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1292 int udp_rcv(struct sk_buff *skb)
1294 return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
1297 void udp_destroy_sock(struct sock *sk)
1300 udp_flush_pending_frames(sk);
1305 * Socket option code for UDP
1307 int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1308 char __user *optval, int optlen,
1309 int (*push_pending_frames)(struct sock *))
1311 struct udp_sock *up = udp_sk(sk);
1314 int is_udplite = IS_UDPLITE(sk);
1316 if (optlen<sizeof(int))
1319 if (get_user(val, (int __user *)optval))
1329 (*push_pending_frames)(sk);
1337 case UDP_ENCAP_ESPINUDP:
1338 case UDP_ENCAP_ESPINUDP_NON_IKE:
1339 up->encap_rcv = xfrm4_udp_encap_rcv;
1341 case UDP_ENCAP_L2TPINUDP:
1342 up->encap_type = val;
1351 * UDP-Lite's partial checksum coverage (RFC 3828).
1353 /* The sender sets actual checksum coverage length via this option.
1354 * The case coverage > packet length is handled by send module. */
1355 case UDPLITE_SEND_CSCOV:
1356 if (!is_udplite) /* Disable the option on UDP sockets */
1357 return -ENOPROTOOPT;
1358 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
1360 else if (val > USHORT_MAX)
1363 up->pcflag |= UDPLITE_SEND_CC;
1366 /* The receiver specifies a minimum checksum coverage value. To make
1367 * sense, this should be set to at least 8 (as done below). If zero is
1368 * used, this again means full checksum coverage. */
1369 case UDPLITE_RECV_CSCOV:
1370 if (!is_udplite) /* Disable the option on UDP sockets */
1371 return -ENOPROTOOPT;
1372 if (val != 0 && val < 8) /* Avoid silly minimal values. */
1374 else if (val > USHORT_MAX)
1377 up->pcflag |= UDPLITE_RECV_CC;
1388 int udp_setsockopt(struct sock *sk, int level, int optname,
1389 char __user *optval, int optlen)
1391 if (level == SOL_UDP || level == SOL_UDPLITE)
1392 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1393 udp_push_pending_frames);
1394 return ip_setsockopt(sk, level, optname, optval, optlen);
1397 #ifdef CONFIG_COMPAT
1398 int compat_udp_setsockopt(struct sock *sk, int level, int optname,
1399 char __user *optval, int optlen)
1401 if (level == SOL_UDP || level == SOL_UDPLITE)
1402 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1403 udp_push_pending_frames);
1404 return compat_ip_setsockopt(sk, level, optname, optval, optlen);
1408 int udp_lib_getsockopt(struct sock *sk, int level, int optname,
1409 char __user *optval, int __user *optlen)
1411 struct udp_sock *up = udp_sk(sk);
1414 if (get_user(len,optlen))
1417 len = min_t(unsigned int, len, sizeof(int));
1428 val = up->encap_type;
1431 /* The following two cannot be changed on UDP sockets, the return is
1432 * always 0 (which corresponds to the full checksum coverage of UDP). */
1433 case UDPLITE_SEND_CSCOV:
1437 case UDPLITE_RECV_CSCOV:
1442 return -ENOPROTOOPT;
1445 if (put_user(len, optlen))
1447 if (copy_to_user(optval, &val,len))
1452 int udp_getsockopt(struct sock *sk, int level, int optname,
1453 char __user *optval, int __user *optlen)
1455 if (level == SOL_UDP || level == SOL_UDPLITE)
1456 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1457 return ip_getsockopt(sk, level, optname, optval, optlen);
1460 #ifdef CONFIG_COMPAT
1461 int compat_udp_getsockopt(struct sock *sk, int level, int optname,
1462 char __user *optval, int __user *optlen)
1464 if (level == SOL_UDP || level == SOL_UDPLITE)
1465 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1466 return compat_ip_getsockopt(sk, level, optname, optval, optlen);
1470 * udp_poll - wait for a UDP event.
1471 * @file - file struct
1473 * @wait - poll table
1475 * This is same as datagram poll, except for the special case of
1476 * blocking sockets. If application is using a blocking fd
1477 * and a packet with checksum error is in the queue;
1478 * then it could get return from select indicating data available
1479 * but then block when reading it. Add special case code
1480 * to work around these arguably broken applications.
1482 unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1484 unsigned int mask = datagram_poll(file, sock, wait);
1485 struct sock *sk = sock->sk;
1486 int is_lite = IS_UDPLITE(sk);
1488 /* Check for false positives due to checksum errors */
1489 if ( (mask & POLLRDNORM) &&
1490 !(file->f_flags & O_NONBLOCK) &&
1491 !(sk->sk_shutdown & RCV_SHUTDOWN)){
1492 struct sk_buff_head *rcvq = &sk->sk_receive_queue;
1493 struct sk_buff *skb;
1495 spin_lock_bh(&rcvq->lock);
1496 while ((skb = skb_peek(rcvq)) != NULL &&
1497 udp_lib_checksum_complete(skb)) {
1498 UDP_INC_STATS_BH(sock_net(sk),
1499 UDP_MIB_INERRORS, is_lite);
1500 __skb_unlink(skb, rcvq);
1503 spin_unlock_bh(&rcvq->lock);
1505 /* nothing to see, move along */
1507 mask &= ~(POLLIN | POLLRDNORM);
1514 struct proto udp_prot = {
1516 .owner = THIS_MODULE,
1517 .close = udp_lib_close,
1518 .connect = ip4_datagram_connect,
1519 .disconnect = udp_disconnect,
1521 .destroy = udp_destroy_sock,
1522 .setsockopt = udp_setsockopt,
1523 .getsockopt = udp_getsockopt,
1524 .sendmsg = udp_sendmsg,
1525 .recvmsg = udp_recvmsg,
1526 .sendpage = udp_sendpage,
1527 .backlog_rcv = __udp_queue_rcv_skb,
1528 .hash = udp_lib_hash,
1529 .unhash = udp_lib_unhash,
1530 .get_port = udp_v4_get_port,
1531 .memory_allocated = &udp_memory_allocated,
1532 .sysctl_mem = sysctl_udp_mem,
1533 .sysctl_wmem = &sysctl_udp_wmem_min,
1534 .sysctl_rmem = &sysctl_udp_rmem_min,
1535 .obj_size = sizeof(struct udp_sock),
1536 .slab_flags = SLAB_DESTROY_BY_RCU,
1537 .h.udp_table = &udp_table,
1538 #ifdef CONFIG_COMPAT
1539 .compat_setsockopt = compat_udp_setsockopt,
1540 .compat_getsockopt = compat_udp_getsockopt,
1544 /* ------------------------------------------------------------------------ */
1545 #ifdef CONFIG_PROC_FS
1547 static struct sock *udp_get_first(struct seq_file *seq, int start)
1550 struct udp_iter_state *state = seq->private;
1551 struct net *net = seq_file_net(seq);
1553 for (state->bucket = start; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
1554 struct hlist_node *node;
1555 struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
1556 spin_lock_bh(&hslot->lock);
1557 sk_for_each(sk, node, &hslot->head) {
1558 if (!net_eq(sock_net(sk), net))
1560 if (sk->sk_family == state->family)
1563 spin_unlock_bh(&hslot->lock);
1570 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
1572 struct udp_iter_state *state = seq->private;
1573 struct net *net = seq_file_net(seq);
1577 } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
1580 spin_unlock(&state->udp_table->hash[state->bucket].lock);
1581 return udp_get_first(seq, state->bucket + 1);
1586 static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
1588 struct sock *sk = udp_get_first(seq, 0);
1591 while (pos && (sk = udp_get_next(seq, sk)) != NULL)
1593 return pos ? NULL : sk;
1596 static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
1598 return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
1601 static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1605 if (v == SEQ_START_TOKEN)
1606 sk = udp_get_idx(seq, 0);
1608 sk = udp_get_next(seq, v);
1614 static void udp_seq_stop(struct seq_file *seq, void *v)
1616 struct udp_iter_state *state = seq->private;
1618 if (state->bucket < UDP_HTABLE_SIZE)
1619 spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
1622 static int udp_seq_open(struct inode *inode, struct file *file)
1624 struct udp_seq_afinfo *afinfo = PDE(inode)->data;
1625 struct udp_iter_state *s;
1628 err = seq_open_net(inode, file, &afinfo->seq_ops,
1629 sizeof(struct udp_iter_state));
1633 s = ((struct seq_file *)file->private_data)->private;
1634 s->family = afinfo->family;
1635 s->udp_table = afinfo->udp_table;
1639 /* ------------------------------------------------------------------------ */
1640 int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
1642 struct proc_dir_entry *p;
1645 afinfo->seq_fops.open = udp_seq_open;
1646 afinfo->seq_fops.read = seq_read;
1647 afinfo->seq_fops.llseek = seq_lseek;
1648 afinfo->seq_fops.release = seq_release_net;
1650 afinfo->seq_ops.start = udp_seq_start;
1651 afinfo->seq_ops.next = udp_seq_next;
1652 afinfo->seq_ops.stop = udp_seq_stop;
1654 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
1655 &afinfo->seq_fops, afinfo);
1661 void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
1663 proc_net_remove(net, afinfo->name);
1666 /* ------------------------------------------------------------------------ */
1667 static void udp4_format_sock(struct sock *sp, struct seq_file *f,
1668 int bucket, int *len)
1670 struct inet_sock *inet = inet_sk(sp);
1671 __be32 dest = inet->daddr;
1672 __be32 src = inet->rcv_saddr;
1673 __u16 destp = ntohs(inet->dport);
1674 __u16 srcp = ntohs(inet->sport);
1676 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
1677 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n",
1678 bucket, src, srcp, dest, destp, sp->sk_state,
1679 atomic_read(&sp->sk_wmem_alloc),
1680 atomic_read(&sp->sk_rmem_alloc),
1681 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
1682 atomic_read(&sp->sk_refcnt), sp,
1683 atomic_read(&sp->sk_drops), len);
1686 int udp4_seq_show(struct seq_file *seq, void *v)
1688 if (v == SEQ_START_TOKEN)
1689 seq_printf(seq, "%-127s\n",
1690 " sl local_address rem_address st tx_queue "
1691 "rx_queue tr tm->when retrnsmt uid timeout "
1692 "inode ref pointer drops");
1694 struct udp_iter_state *state = seq->private;
1697 udp4_format_sock(v, seq, state->bucket, &len);
1698 seq_printf(seq, "%*s\n", 127 - len ,"");
1703 /* ------------------------------------------------------------------------ */
1704 static struct udp_seq_afinfo udp4_seq_afinfo = {
1707 .udp_table = &udp_table,
1709 .owner = THIS_MODULE,
1712 .show = udp4_seq_show,
1716 static int udp4_proc_init_net(struct net *net)
1718 return udp_proc_register(net, &udp4_seq_afinfo);
1721 static void udp4_proc_exit_net(struct net *net)
1723 udp_proc_unregister(net, &udp4_seq_afinfo);
1726 static struct pernet_operations udp4_net_ops = {
1727 .init = udp4_proc_init_net,
1728 .exit = udp4_proc_exit_net,
1731 int __init udp4_proc_init(void)
1733 return register_pernet_subsys(&udp4_net_ops);
1736 void udp4_proc_exit(void)
1738 unregister_pernet_subsys(&udp4_net_ops);
1740 #endif /* CONFIG_PROC_FS */
1742 void __init udp_table_init(struct udp_table *table)
1746 for (i = 0; i < UDP_HTABLE_SIZE; i++) {
1747 INIT_HLIST_HEAD(&table->hash[i].head);
1748 spin_lock_init(&table->hash[i].lock);
1752 void __init udp_init(void)
1754 unsigned long limit;
1756 udp_table_init(&udp_table);
1757 /* Set the pressure threshold up by the same strategy of TCP. It is a
1758 * fraction of global memory that is up to 1/2 at 256 MB, decreasing
1759 * toward zero with the amount of memory, with a floor of 128 pages.
1761 limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
1762 limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
1763 limit = max(limit, 128UL);
1764 sysctl_udp_mem[0] = limit / 4 * 3;
1765 sysctl_udp_mem[1] = limit;
1766 sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
1768 sysctl_udp_rmem_min = SK_MEM_QUANTUM;
1769 sysctl_udp_wmem_min = SK_MEM_QUANTUM;
1772 EXPORT_SYMBOL(udp_disconnect);
1773 EXPORT_SYMBOL(udp_ioctl);
1774 EXPORT_SYMBOL(udp_prot);
1775 EXPORT_SYMBOL(udp_sendmsg);
1776 EXPORT_SYMBOL(udp_lib_getsockopt);
1777 EXPORT_SYMBOL(udp_lib_setsockopt);
1778 EXPORT_SYMBOL(udp_poll);
1779 EXPORT_SYMBOL(udp_lib_get_port);
1781 #ifdef CONFIG_PROC_FS
1782 EXPORT_SYMBOL(udp_proc_register);
1783 EXPORT_SYMBOL(udp_proc_unregister);