ipv6: Fix conflict resolutions during ipv6 binding
[safe/jmp/linux-2.6] / net / ipv4 / udp.c
index c97da53..ace2ac8 100644 (file)
@@ -8,7 +8,7 @@
  * Authors:    Ross Biro
  *             Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  *             Arnt Gulbrandsen, <agulbra@nvg.unit.no>
- *             Alan Cox, <Alan.Cox@linux.org>
+ *             Alan Cox, <alan@lxorguk.ukuu.org.uk>
  *             Hirokazu Takahashi, <taka@valinux.co.jp>
  *
  * Fixes:
@@ -81,6 +81,8 @@
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
 #include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/swap.h>
 #include <linux/types.h>
 #include <linux/fcntl.h>
 #include <linux/module.h>
 #include <net/xfrm.h>
 #include "udp_impl.h"
 
-/*
- *     Snmp MIB for the UDP layer
- */
-
-DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly;
-EXPORT_SYMBOL(udp_statistics);
-
-DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly;
-EXPORT_SYMBOL(udp_stats_in6);
-
-struct hlist_head udp_hash[UDP_HTABLE_SIZE];
-DEFINE_RWLOCK(udp_hash_lock);
+struct udp_table udp_table;
+EXPORT_SYMBOL(udp_table);
 
 int sysctl_udp_mem[3] __read_mostly;
 int sysctl_udp_rmem_min __read_mostly;
@@ -128,15 +120,32 @@ EXPORT_SYMBOL(sysctl_udp_wmem_min);
 atomic_t udp_memory_allocated;
 EXPORT_SYMBOL(udp_memory_allocated);
 
-static inline int __udp_lib_lport_inuse(struct net *net, __u16 num,
-                                       const struct hlist_head udptable[])
-{
-       struct sock *sk;
-       struct hlist_node *node;
+#define PORTS_PER_CHAIN (65536 / UDP_HTABLE_SIZE)
 
-       sk_for_each(sk, node, &udptable[udp_hashfn(net, num)])
-               if (net_eq(sock_net(sk), net) && sk->sk_hash == num)
-                       return 1;
+static int udp_lib_lport_inuse(struct net *net, __u16 num,
+                              const struct udp_hslot *hslot,
+                              unsigned long *bitmap,
+                              struct sock *sk,
+                              int (*saddr_comp)(const struct sock *sk1,
+                                                const struct sock *sk2))
+{
+       struct sock *sk2;
+       struct hlist_nulls_node *node;
+
+       sk_nulls_for_each(sk2, node, &hslot->head)
+               if (net_eq(sock_net(sk2), net)                  &&
+                   sk2 != sk                                   &&
+                   (bitmap || sk2->sk_hash == num)             &&
+                   (!sk2->sk_reuse || !sk->sk_reuse)           &&
+                   (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
+                       || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
+                   (*saddr_comp)(sk, sk2)) {
+                       if (bitmap)
+                               __set_bit(sk2->sk_hash / UDP_HTABLE_SIZE,
+                                         bitmap);
+                       else
+                               return 1;
+               }
        return 0;
 }
 
@@ -151,93 +160,69 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
                       int (*saddr_comp)(const struct sock *sk1,
                                         const struct sock *sk2 )    )
 {
-       struct hlist_head *udptable = sk->sk_prot->h.udp_hash;
-       struct hlist_node *node;
-       struct hlist_head *head;
-       struct sock *sk2;
+       struct udp_hslot *hslot;
+       struct udp_table *udptable = sk->sk_prot->h.udp_table;
        int    error = 1;
        struct net *net = sock_net(sk);
 
-       write_lock_bh(&udp_hash_lock);
-
        if (!snum) {
-               int i, low, high, remaining;
-               unsigned rover, best, best_size_so_far;
+               int low, high, remaining;
+               unsigned rand;
+               unsigned short first, last;
+               DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
 
                inet_get_local_port_range(&low, &high);
                remaining = (high - low) + 1;
 
-               best_size_so_far = UINT_MAX;
-               best = rover = net_random() % remaining + low;
-
-               /* 1st pass: look for empty (or shortest) hash chain */
-               for (i = 0; i < UDP_HTABLE_SIZE; i++) {
-                       int size = 0;
-
-                       head = &udptable[udp_hashfn(net, rover)];
-                       if (hlist_empty(head))
-                               goto gotit;
-
-                       sk_for_each(sk2, node, head) {
-                               if (++size >= best_size_so_far)
-                                       goto next;
-                       }
-                       best_size_so_far = size;
-                       best = rover;
-               next:
-                       /* fold back if end of range */
-                       if (++rover > high)
-                               rover = low + ((rover - low)
-                                              & (UDP_HTABLE_SIZE - 1));
-
-
-               }
-
-               /* 2nd pass: find hole in shortest hash chain */
-               rover = best;
-               for (i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++) {
-                       if (! __udp_lib_lport_inuse(net, rover, udptable))
-                               goto gotit;
-                       rover += UDP_HTABLE_SIZE;
-                       if (rover > high)
-                               rover = low + ((rover - low)
-                                              & (UDP_HTABLE_SIZE - 1));
+               rand = net_random();
+               first = (((u64)rand * remaining) >> 32) + low;
+               /*
+                * force rand to be an odd multiple of UDP_HTABLE_SIZE
+                */
+               rand = (rand | 1) * UDP_HTABLE_SIZE;
+               for (last = first + UDP_HTABLE_SIZE; first != last; first++) {
+                       hslot = &udptable->hash[udp_hashfn(net, first)];
+                       bitmap_zero(bitmap, PORTS_PER_CHAIN);
+                       spin_lock_bh(&hslot->lock);
+                       udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
+                                           saddr_comp);
+
+                       snum = first;
+                       /*
+                        * Iterate on all possible values of snum for this hash.
+                        * Using steps of an odd multiple of UDP_HTABLE_SIZE
+                        * give us randomization and full range coverage.
+                        */
+                       do {
+                               if (low <= snum && snum <= high &&
+                                   !test_bit(snum / UDP_HTABLE_SIZE, bitmap))
+                                       goto found;
+                               snum += rand;
+                       } while (snum != first);
+                       spin_unlock_bh(&hslot->lock);
                }
-
-
-               /* All ports in use! */
                goto fail;
-
-gotit:
-               snum = rover;
        } else {
-               head = &udptable[udp_hashfn(net, snum)];
-
-               sk_for_each(sk2, node, head)
-                       if (sk2->sk_hash == snum                             &&
-                           sk2 != sk                                        &&
-                           net_eq(sock_net(sk2), net)                       &&
-                           (!sk2->sk_reuse        || !sk->sk_reuse)         &&
-                           (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
-                            || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
-                           (*saddr_comp)(sk, sk2)                             )
-                               goto fail;
+               hslot = &udptable->hash[udp_hashfn(net, snum)];
+               spin_lock_bh(&hslot->lock);
+               if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp))
+                       goto fail_unlock;
        }
-
+found:
        inet_sk(sk)->num = snum;
        sk->sk_hash = snum;
        if (sk_unhashed(sk)) {
-               head = &udptable[udp_hashfn(net, snum)];
-               sk_add_node(sk, head);
+               sk_nulls_add_node_rcu(sk, &hslot->head);
                sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
        }
        error = 0;
+fail_unlock:
+       spin_unlock_bh(&hslot->lock);
 fail:
-       write_unlock_bh(&udp_hash_lock);
        return error;
 }
 
-static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
+int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
 {
        struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
 
@@ -251,73 +236,124 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum)
        return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal);
 }
 
+static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr,
+                        unsigned short hnum,
+                        __be16 sport, __be32 daddr, __be16 dport, int dif)
+{
+       int score = -1;
+
+       if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
+                       !ipv6_only_sock(sk)) {
+               struct inet_sock *inet = inet_sk(sk);
+
+               score = (sk->sk_family == PF_INET ? 1 : 0);
+               if (inet->rcv_saddr) {
+                       if (inet->rcv_saddr != daddr)
+                               return -1;
+                       score += 2;
+               }
+               if (inet->daddr) {
+                       if (inet->daddr != saddr)
+                               return -1;
+                       score += 2;
+               }
+               if (inet->dport) {
+                       if (inet->dport != sport)
+                               return -1;
+                       score += 2;
+               }
+               if (sk->sk_bound_dev_if) {
+                       if (sk->sk_bound_dev_if != dif)
+                               return -1;
+                       score += 2;
+               }
+       }
+       return score;
+}
+
 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
  * harder than this. -DaveM
  */
 static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
                __be16 sport, __be32 daddr, __be16 dport,
-               int dif, struct hlist_head udptable[])
+               int dif, struct udp_table *udptable)
 {
-       struct sock *sk, *result = NULL;
-       struct hlist_node *node;
+       struct sock *sk, *result;
+       struct hlist_nulls_node *node;
        unsigned short hnum = ntohs(dport);
-       int badness = -1;
-
-       read_lock(&udp_hash_lock);
-       sk_for_each(sk, node, &udptable[udp_hashfn(net, hnum)]) {
-               struct inet_sock *inet = inet_sk(sk);
-
-               if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
-                               !ipv6_only_sock(sk)) {
-                       int score = (sk->sk_family == PF_INET ? 1 : 0);
-                       if (inet->rcv_saddr) {
-                               if (inet->rcv_saddr != daddr)
-                                       continue;
-                               score+=2;
-                       }
-                       if (inet->daddr) {
-                               if (inet->daddr != saddr)
-                                       continue;
-                               score+=2;
-                       }
-                       if (inet->dport) {
-                               if (inet->dport != sport)
-                                       continue;
-                               score+=2;
-                       }
-                       if (sk->sk_bound_dev_if) {
-                               if (sk->sk_bound_dev_if != dif)
-                                       continue;
-                               score+=2;
-                       }
-                       if (score == 9) {
-                               result = sk;
-                               break;
-                       } else if (score > badness) {
-                               result = sk;
-                               badness = score;
-                       }
+       unsigned int hash = udp_hashfn(net, hnum);
+       struct udp_hslot *hslot = &udptable->hash[hash];
+       int score, badness;
+
+       rcu_read_lock();
+begin:
+       result = NULL;
+       badness = -1;
+       sk_nulls_for_each_rcu(sk, node, &hslot->head) {
+               score = compute_score(sk, net, saddr, hnum, sport,
+                                     daddr, dport, dif);
+               if (score > badness) {
+                       result = sk;
+                       badness = score;
                }
        }
-       if (result)
-               sock_hold(result);
-       read_unlock(&udp_hash_lock);
+       /*
+        * if the nulls value we got at the end of this lookup is
+        * not the expected one, we must restart lookup.
+        * We probably met an item that was moved to another chain.
+        */
+       if (get_nulls_value(node) != hash)
+               goto begin;
+
+       if (result) {
+               if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
+                       result = NULL;
+               else if (unlikely(compute_score(result, net, saddr, hnum, sport,
+                                 daddr, dport, dif) < badness)) {
+                       sock_put(result);
+                       goto begin;
+               }
+       }
+       rcu_read_unlock();
        return result;
 }
 
-static inline struct sock *udp_v4_mcast_next(struct sock *sk,
+static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
+                                                __be16 sport, __be16 dport,
+                                                struct udp_table *udptable)
+{
+       struct sock *sk;
+       const struct iphdr *iph = ip_hdr(skb);
+
+       if (unlikely(sk = skb_steal_sock(skb)))
+               return sk;
+       else
+               return __udp4_lib_lookup(dev_net(skb->dst->dev), iph->saddr, sport,
+                                        iph->daddr, dport, inet_iif(skb),
+                                        udptable);
+}
+
+struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+                            __be32 daddr, __be16 dport, int dif)
+{
+       return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
+}
+EXPORT_SYMBOL_GPL(udp4_lib_lookup);
+
+static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
                                             __be16 loc_port, __be32 loc_addr,
                                             __be16 rmt_port, __be32 rmt_addr,
                                             int dif)
 {
-       struct hlist_node *node;
+       struct hlist_nulls_node *node;
        struct sock *s = sk;
        unsigned short hnum = ntohs(loc_port);
 
-       sk_for_each_from(s, node) {
+       sk_nulls_for_each_from(s, node) {
                struct inet_sock *inet = inet_sk(s);
 
-               if (s->sk_hash != hnum                                  ||
+               if (!net_eq(sock_net(s), net)                           ||
+                   s->sk_hash != hnum                                  ||
                    (inet->daddr && inet->daddr != rmt_addr)            ||
                    (inet->dport != rmt_port && inet->dport)            ||
                    (inet->rcv_saddr && inet->rcv_saddr != loc_addr)    ||
@@ -344,7 +380,7 @@ found:
  * to find the appropriate port.
  */
 
-void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[])
+void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
 {
        struct inet_sock *inet;
        struct iphdr *iph = (struct iphdr*)skb->data;
@@ -354,11 +390,12 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[])
        struct sock *sk;
        int harderr;
        int err;
+       struct net *net = dev_net(skb->dev);
 
-       sk = __udp4_lib_lookup(dev_net(skb->dev), iph->daddr, uh->dest,
+       sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
                        iph->saddr, uh->source, skb->dev->ifindex, udptable);
        if (sk == NULL) {
-               ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+               ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
                return; /* No socket for error */
        }
 
@@ -412,7 +449,7 @@ out:
 
 void udp_err(struct sk_buff *skb, u32 info)
 {
-       __udp4_lib_err(skb, info, udp_hash);
+       __udp4_lib_err(skb, info, &udp_table);
 }
 
 /*
@@ -559,6 +596,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                return -EOPNOTSUPP;
 
        ipc.opt = NULL;
+       ipc.shtx.flags = 0;
 
        if (up->pending) {
                /*
@@ -606,6 +644,9 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        ipc.addr = inet->saddr;
 
        ipc.oif = sk->sk_bound_dev_if;
+       err = sock_tx_timestamp(msg, sk, &ipc.shtx);
+       if (err)
+               return err;
        if (msg->msg_controllen) {
                err = ip_cmsg_send(sock_net(sk), msg, &ipc);
                if (err)
@@ -652,14 +693,17 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                                                .saddr = saddr,
                                                .tos = tos } },
                                    .proto = sk->sk_protocol,
+                                   .flags = inet_sk_flowi_flags(sk),
                                    .uli_u = { .ports =
                                               { .sport = inet->sport,
                                                 .dport = dport } } };
+               struct net *net = sock_net(sk);
+
                security_sk_classify_flow(sk, &fl);
-               err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 1);
+               err = ip_route_output_flow(net, &rt, &fl, sk, 1);
                if (err) {
                        if (err == -ENETUNREACH)
-                               IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
+                               IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
                        goto out;
                }
 
@@ -702,7 +746,7 @@ do_append_data:
        up->len += ulen;
        getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
        err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
-                       sizeof(struct udphdr), &ipc, rt,
+                       sizeof(struct udphdr), &ipc, &rt,
                        corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
        if (err)
                udp_flush_pending_frames(sk);
@@ -951,6 +995,46 @@ int udp_disconnect(struct sock *sk, int flags)
        return 0;
 }
 
+void udp_lib_unhash(struct sock *sk)
+{
+       if (sk_hashed(sk)) {
+               struct udp_table *udptable = sk->sk_prot->h.udp_table;
+               unsigned int hash = udp_hashfn(sock_net(sk), sk->sk_hash);
+               struct udp_hslot *hslot = &udptable->hash[hash];
+
+               spin_lock_bh(&hslot->lock);
+               if (sk_nulls_del_node_init_rcu(sk)) {
+                       inet_sk(sk)->num = 0;
+                       sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+               }
+               spin_unlock_bh(&hslot->lock);
+       }
+}
+EXPORT_SYMBOL(udp_lib_unhash);
+
+static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+       int is_udplite = IS_UDPLITE(sk);
+       int rc;
+
+       if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
+               /* Note that an ENOMEM error is charged twice */
+               if (rc == -ENOMEM) {
+                       UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+                                        is_udplite);
+                       atomic_inc(&sk->sk_drops);
+               }
+               goto drop;
+       }
+
+       return 0;
+
+drop:
+       UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+       kfree_skb(skb);
+       return -1;
+}
+
 /* returns:
  *  -1: error
  *   0: success
@@ -991,7 +1075,8 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
 
                        ret = (*up->encap_rcv)(sk, skb);
                        if (ret <= 0) {
-                               UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS,
+                               UDP_INC_STATS_BH(sock_net(sk),
+                                                UDP_MIB_INDATAGRAMS,
                                                 is_udplite);
                                return -ret;
                        }
@@ -1041,19 +1126,19 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
                        goto drop;
        }
 
-       if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
-               /* Note that an ENOMEM error is charged twice */
-               if (rc == -ENOMEM) {
-                       UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite);
-                       atomic_inc(&sk->sk_drops);
-               }
-               goto drop;
-       }
+       rc = 0;
 
-       return 0;
+       bh_lock_sock(sk);
+       if (!sock_owned_by_user(sk))
+               rc = __udp_queue_rcv_skb(sk, skb);
+       else
+               sk_add_backlog(sk, skb);
+       bh_unlock_sock(sk);
+
+       return rc;
 
 drop:
-       UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
+       UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
        kfree_skb(skb);
        return -1;
 }
@@ -1067,36 +1152,30 @@ drop:
 static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
                                    struct udphdr  *uh,
                                    __be32 saddr, __be32 daddr,
-                                   struct hlist_head udptable[])
+                                   struct udp_table *udptable)
 {
        struct sock *sk;
+       struct udp_hslot *hslot = &udptable->hash[udp_hashfn(net, ntohs(uh->dest))];
        int dif;
 
-       read_lock(&udp_hash_lock);
-       sk = sk_head(&udptable[udp_hashfn(net, ntohs(uh->dest))]);
+       spin_lock(&hslot->lock);
+       sk = sk_nulls_head(&hslot->head);
        dif = skb->dev->ifindex;
-       sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
+       sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
        if (sk) {
                struct sock *sknext = NULL;
 
                do {
                        struct sk_buff *skb1 = skb;
 
-                       sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr,
-                                                  uh->source, saddr, dif);
+                       sknext = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest,
+                                                  daddr, uh->source, saddr,
+                                                  dif);
                        if (sknext)
                                skb1 = skb_clone(skb, GFP_ATOMIC);
 
                        if (skb1) {
-                               int ret = 0;
-
-                               bh_lock_sock_nested(sk);
-                               if (!sock_owned_by_user(sk))
-                                       ret = udp_queue_rcv_skb(sk, skb1);
-                               else
-                                       sk_add_backlog(sk, skb1);
-                               bh_unlock_sock(sk);
-
+                               int ret = udp_queue_rcv_skb(sk, skb1);
                                if (ret > 0)
                                        /* we should probably re-process instead
                                         * of dropping packets here. */
@@ -1105,8 +1184,8 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
                        sk = sknext;
                } while (sknext);
        } else
-               kfree_skb(skb);
-       read_unlock(&udp_hash_lock);
+               consume_skb(skb);
+       spin_unlock(&hslot->lock);
        return 0;
 }
 
@@ -1152,16 +1231,15 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
  *     All we need to do is get the socket, and then do a checksum.
  */
 
-int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
+int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                   int proto)
 {
        struct sock *sk;
-       struct udphdr *uh = udp_hdr(skb);
+       struct udphdr *uh;
        unsigned short ulen;
        struct rtable *rt = (struct rtable*)skb->dst;
-       __be32 saddr = ip_hdr(skb)->saddr;
-       __be32 daddr = ip_hdr(skb)->daddr;
-       struct net *net;
+       __be32 saddr, daddr;
+       struct net *net = dev_net(skb->dev);
 
        /*
         *  Validate the packet.
@@ -1169,6 +1247,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
        if (!pskb_may_pull(skb, sizeof(struct udphdr)))
                goto drop;              /* No space for header. */
 
+       uh   = udp_hdr(skb);
        ulen = ntohs(uh->len);
        if (ulen > skb->len)
                goto short_packet;
@@ -1183,22 +1262,17 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
        if (udp4_csum_init(skb, uh, proto))
                goto csum_error;
 
-       net = dev_net(skb->dev);
+       saddr = ip_hdr(skb)->saddr;
+       daddr = ip_hdr(skb)->daddr;
+
        if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
                return __udp4_lib_mcast_deliver(net, skb, uh,
                                saddr, daddr, udptable);
 
-       sk = __udp4_lib_lookup(net, saddr, uh->source, daddr,
-                       uh->dest, inet_iif(skb), udptable);
+       sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
 
        if (sk != NULL) {
-               int ret = 0;
-               bh_lock_sock_nested(sk);
-               if (!sock_owned_by_user(sk))
-                       ret = udp_queue_rcv_skb(sk, skb);
-               else
-                       sk_add_backlog(sk, skb);
-               bh_unlock_sock(sk);
+               int ret = udp_queue_rcv_skb(sk, skb);
                sock_put(sk);
 
                /* a return value > 0 means to resubmit the input, but
@@ -1217,7 +1291,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
        if (udp_lib_checksum_complete(skb))
                goto csum_error;
 
-       UDP_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
+       UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 
        /*
@@ -1228,13 +1302,13 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
        return 0;
 
 short_packet:
-       LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From " NIPQUAD_FMT ":%u %d/%d to " NIPQUAD_FMT ":%u\n",
+       LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
                       proto == IPPROTO_UDPLITE ? "-Lite" : "",
-                      NIPQUAD(saddr),
+                      &saddr,
                       ntohs(uh->source),
                       ulen,
                       skb->len,
-                      NIPQUAD(daddr),
+                      &daddr,
                       ntohs(uh->dest));
        goto drop;
 
@@ -1243,22 +1317,22 @@ csum_error:
         * RFC1122: OK.  Discards the bad packet silently (as far as
         * the network is concerned, anyway) as per 4.1.3.4 (MUST).
         */
-       LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From " NIPQUAD_FMT ":%u to " NIPQUAD_FMT ":%u ulen %d\n",
+       LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
                       proto == IPPROTO_UDPLITE ? "-Lite" : "",
-                      NIPQUAD(saddr),
+                      &saddr,
                       ntohs(uh->source),
-                      NIPQUAD(daddr),
+                      &daddr,
                       ntohs(uh->dest),
                       ulen);
 drop:
-       UDP_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
+       UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
        kfree_skb(skb);
        return 0;
 }
 
 int udp_rcv(struct sk_buff *skb)
 {
-       return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP);
+       return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
 }
 
 void udp_destroy_sock(struct sock *sk)
@@ -1324,6 +1398,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
                        return -ENOPROTOOPT;
                if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
                        val = 8;
+               else if (val > USHORT_MAX)
+                       val = USHORT_MAX;
                up->pcslen = val;
                up->pcflag |= UDPLITE_SEND_CC;
                break;
@@ -1336,6 +1412,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
                        return -ENOPROTOOPT;
                if (val != 0 && val < 8) /* Avoid silly minimal values.       */
                        val = 8;
+               else if (val > USHORT_MAX)
+                       val = USHORT_MAX;
                up->pcrlen = val;
                up->pcflag |= UDPLITE_RECV_CC;
                break;
@@ -1458,7 +1536,8 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
                spin_lock_bh(&rcvq->lock);
                while ((skb = skb_peek(rcvq)) != NULL &&
                       udp_lib_checksum_complete(skb)) {
-                       UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite);
+                       UDP_INC_STATS_BH(sock_net(sk),
+                                       UDP_MIB_INERRORS, is_lite);
                        __skb_unlink(skb, rcvq);
                        kfree_skb(skb);
                }
@@ -1486,7 +1565,7 @@ struct proto udp_prot = {
        .sendmsg           = udp_sendmsg,
        .recvmsg           = udp_recvmsg,
        .sendpage          = udp_sendpage,
-       .backlog_rcv       = udp_queue_rcv_skb,
+       .backlog_rcv       = __udp_queue_rcv_skb,
        .hash              = udp_lib_hash,
        .unhash            = udp_lib_unhash,
        .get_port          = udp_v4_get_port,
@@ -1495,7 +1574,8 @@ struct proto udp_prot = {
        .sysctl_wmem       = &sysctl_udp_wmem_min,
        .sysctl_rmem       = &sysctl_udp_rmem_min,
        .obj_size          = sizeof(struct udp_sock),
-       .h.udp_hash        = udp_hash,
+       .slab_flags        = SLAB_DESTROY_BY_RCU,
+       .h.udp_table       = &udp_table,
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_udp_setsockopt,
        .compat_getsockopt = compat_udp_getsockopt,
@@ -1505,20 +1585,23 @@ struct proto udp_prot = {
 /* ------------------------------------------------------------------------ */
 #ifdef CONFIG_PROC_FS
 
-static struct sock *udp_get_first(struct seq_file *seq)
+static struct sock *udp_get_first(struct seq_file *seq, int start)
 {
        struct sock *sk;
        struct udp_iter_state *state = seq->private;
        struct net *net = seq_file_net(seq);
 
-       for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
-               struct hlist_node *node;
-               sk_for_each(sk, node, state->hashtable + state->bucket) {
+       for (state->bucket = start; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
+               struct hlist_nulls_node *node;
+               struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
+               spin_lock_bh(&hslot->lock);
+               sk_nulls_for_each(sk, node, &hslot->head) {
                        if (!net_eq(sock_net(sk), net))
                                continue;
                        if (sk->sk_family == state->family)
                                goto found;
                }
+               spin_unlock_bh(&hslot->lock);
        }
        sk = NULL;
 found:
@@ -1531,21 +1614,19 @@ static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
        struct net *net = seq_file_net(seq);
 
        do {
-               sk = sk_next(sk);
-try_again:
-               ;
+               sk = sk_nulls_next(sk);
        } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
 
-       if (!sk && ++state->bucket < UDP_HTABLE_SIZE) {
-               sk = sk_head(state->hashtable + state->bucket);
-               goto try_again;
+       if (!sk) {
+               spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
+               return udp_get_first(seq, state->bucket + 1);
        }
        return sk;
 }
 
 static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
 {
-       struct sock *sk = udp_get_first(seq);
+       struct sock *sk = udp_get_first(seq, 0);
 
        if (sk)
                while (pos && (sk = udp_get_next(seq, sk)) != NULL)
@@ -1554,9 +1635,7 @@ static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
 }
 
 static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(udp_hash_lock)
 {
-       read_lock(&udp_hash_lock);
        return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
 }
 
@@ -1574,9 +1653,11 @@ static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 }
 
 static void udp_seq_stop(struct seq_file *seq, void *v)
-       __releases(udp_hash_lock)
 {
-       read_unlock(&udp_hash_lock);
+       struct udp_iter_state *state = seq->private;
+
+       if (state->bucket < UDP_HTABLE_SIZE)
+               spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
 }
 
 static int udp_seq_open(struct inode *inode, struct file *file)
@@ -1592,7 +1673,7 @@ static int udp_seq_open(struct inode *inode, struct file *file)
 
        s = ((struct seq_file *)file->private_data)->private;
        s->family               = afinfo->family;
-       s->hashtable            = afinfo->hashtable;
+       s->udp_table            = afinfo->udp_table;
        return err;
 }
 
@@ -1664,7 +1745,7 @@ int udp4_seq_show(struct seq_file *seq, void *v)
 static struct udp_seq_afinfo udp4_seq_afinfo = {
        .name           = "udp",
        .family         = AF_INET,
-       .hashtable      = udp_hash,
+       .udp_table      = &udp_table,
        .seq_fops       = {
                .owner  =       THIS_MODULE,
        },
@@ -1699,16 +1780,28 @@ void udp4_proc_exit(void)
 }
 #endif /* CONFIG_PROC_FS */
 
+void __init udp_table_init(struct udp_table *table)
+{
+       int i;
+
+       for (i = 0; i < UDP_HTABLE_SIZE; i++) {
+               INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
+               spin_lock_init(&table->hash[i].lock);
+       }
+}
+
 void __init udp_init(void)
 {
-       unsigned long limit;
+       unsigned long nr_pages, limit;
 
+       udp_table_init(&udp_table);
        /* Set the pressure threshold up by the same strategy of TCP. It is a
         * fraction of global memory that is up to 1/2 at 256 MB, decreasing
         * toward zero with the amount of memory, with a floor of 128 pages.
         */
-       limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
-       limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
+       nr_pages = totalram_pages - totalhigh_pages;
+       limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
+       limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
        limit = max(limit, 128UL);
        sysctl_udp_mem[0] = limit / 4 * 3;
        sysctl_udp_mem[1] = limit;
@@ -1719,8 +1812,6 @@ void __init udp_init(void)
 }
 
 EXPORT_SYMBOL(udp_disconnect);
-EXPORT_SYMBOL(udp_hash);
-EXPORT_SYMBOL(udp_hash_lock);
 EXPORT_SYMBOL(udp_ioctl);
 EXPORT_SYMBOL(udp_prot);
 EXPORT_SYMBOL(udp_sendmsg);
@@ -1728,6 +1819,7 @@ EXPORT_SYMBOL(udp_lib_getsockopt);
 EXPORT_SYMBOL(udp_lib_setsockopt);
 EXPORT_SYMBOL(udp_poll);
 EXPORT_SYMBOL(udp_lib_get_port);
+EXPORT_SYMBOL(ipv4_rcv_saddr_equal);
 
 #ifdef CONFIG_PROC_FS
 EXPORT_SYMBOL(udp_proc_register);