net: Use hton[sl]() instead of __constant_hton[sl]() where applicable
[safe/jmp/linux-2.6] / net / ipv4 / ipvs / ip_vs_ctl.c
index 25d9e98..771551d 100644 (file)
 
 #include <net/net_namespace.h>
 #include <net/ip.h>
+#ifdef CONFIG_IP_VS_IPV6
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#endif
 #include <net/route.h>
 #include <net/sock.h>
 #include <net/genetlink.h>
@@ -91,6 +95,26 @@ int ip_vs_get_debug_level(void)
 }
 #endif
 
+#ifdef CONFIG_IP_VS_IPV6
+/* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
+static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
+{
+       struct rt6_info *rt;
+       struct flowi fl = {
+               .oif = 0,
+               .nl_u = {
+                       .ip6_u = {
+                               .daddr = *addr,
+                               .saddr = { .s6_addr32 = {0, 0, 0, 0} }, } },
+       };
+
+       rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
+       if (rt && rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK))
+                       return 1;
+
+       return 0;
+}
+#endif
 /*
  *     update_defense_level is called from keventd and from sysctl,
  *     so it needs to protect itself from softirqs
@@ -720,18 +744,7 @@ ip_vs_zero_stats(struct ip_vs_stats *stats)
 {
        spin_lock_bh(&stats->lock);
 
-       stats->conns = 0;
-       stats->inpkts = 0;
-       stats->outpkts = 0;
-       stats->inbytes = 0;
-       stats->outbytes = 0;
-
-       stats->cps = 0;
-       stats->inpps = 0;
-       stats->outpps = 0;
-       stats->inbps = 0;
-       stats->outbps = 0;
-
+       memset(&stats->ustats, 0, sizeof(stats->ustats));
        ip_vs_zero_estimator(stats);
 
        spin_unlock_bh(&stats->lock);
@@ -751,10 +764,18 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
        conn_flags = udest->conn_flags | IP_VS_CONN_F_INACTIVE;
 
        /* check if local node and update the flags */
-       if (inet_addr_type(&init_net, udest->addr.ip) == RTN_LOCAL) {
-               conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK)
-                       | IP_VS_CONN_F_LOCALNODE;
-       }
+#ifdef CONFIG_IP_VS_IPV6
+       if (svc->af == AF_INET6) {
+               if (__ip_vs_addr_is_local_v6(&udest->addr.in6)) {
+                       conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK)
+                               | IP_VS_CONN_F_LOCALNODE;
+               }
+       } else
+#endif
+               if (inet_addr_type(&init_net, udest->addr.ip) == RTN_LOCAL) {
+                       conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK)
+                               | IP_VS_CONN_F_LOCALNODE;
+               }
 
        /* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */
        if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != 0) {
@@ -803,9 +824,20 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
 
        EnterFunction(2);
 
-       atype = inet_addr_type(&init_net, udest->addr.ip);
-       if (atype != RTN_LOCAL && atype != RTN_UNICAST)
-               return -EINVAL;
+#ifdef CONFIG_IP_VS_IPV6
+       if (svc->af == AF_INET6) {
+               atype = ipv6_addr_type(&udest->addr.in6);
+               if ((!(atype & IPV6_ADDR_UNICAST) ||
+                       atype & IPV6_ADDR_LINKLOCAL) &&
+                       !__ip_vs_addr_is_local_v6(&udest->addr.in6))
+                       return -EINVAL;
+       } else
+#endif
+       {
+               atype = inet_addr_type(&init_net, udest->addr.ip);
+               if (atype != RTN_LOCAL && atype != RTN_UNICAST)
+                       return -EINVAL;
+       }
 
        dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC);
        if (dest == NULL) {
@@ -882,13 +914,14 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
        dest = ip_vs_trash_get_dest(svc, &daddr, dport);
 
        if (dest != NULL) {
-               IP_VS_DBG(3, "Get destination %u.%u.%u.%u:%u from trash, "
-                         "dest->refcnt=%d, service %u/%u.%u.%u.%u:%u\n",
-                         NIPQUAD(daddr), ntohs(dport),
-                         atomic_read(&dest->refcnt),
-                         dest->vfwmark,
-                         NIPQUAD(dest->vaddr.ip),
-                         ntohs(dest->vport));
+               IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, "
+                             "dest->refcnt=%d, service %u/%s:%u\n",
+                             IP_VS_DBG_ADDR(svc->af, &daddr), ntohs(dport),
+                             atomic_read(&dest->refcnt),
+                             dest->vfwmark,
+                             IP_VS_DBG_ADDR(svc->af, &dest->vaddr),
+                             ntohs(dest->vport));
+
                __ip_vs_update_dest(svc, dest, udest);
 
                /*
@@ -1034,10 +1067,11 @@ static void __ip_vs_del_dest(struct ip_vs_dest *dest)
                atomic_dec(&dest->svc->refcnt);
                kfree(dest);
        } else {
-               IP_VS_DBG(3, "Moving dest %u.%u.%u.%u:%u into trash, "
-                         "dest->refcnt=%d\n",
-                         NIPQUAD(dest->addr.ip), ntohs(dest->port),
-                         atomic_read(&dest->refcnt));
+               IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, "
+                             "dest->refcnt=%d\n",
+                             IP_VS_DBG_ADDR(dest->af, &dest->addr),
+                             ntohs(dest->port),
+                             atomic_read(&dest->refcnt));
                list_add(&dest->n_list, &ip_vs_dest_trash);
                atomic_inc(&dest->refcnt);
        }
@@ -1133,6 +1167,19 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
                goto out_mod_dec;
        }
 
+#ifdef CONFIG_IP_VS_IPV6
+       if (u->af == AF_INET6) {
+               if (!sched->supports_ipv6) {
+                       ret = -EAFNOSUPPORT;
+                       goto out_err;
+               }
+               if ((u->netmask < 1) || (u->netmask > 128)) {
+                       ret = -EINVAL;
+                       goto out_err;
+               }
+       }
+#endif
+
        svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC);
        if (svc == NULL) {
                IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n");
@@ -1170,7 +1217,10 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
                atomic_inc(&ip_vs_nullsvc_counter);
 
        ip_vs_new_estimator(&svc->stats);
-       ip_vs_num_services++;
+
+       /* Count only IPv4 services for old get/setsockopt interface */
+       if (svc->af == AF_INET)
+               ip_vs_num_services++;
 
        /* Hash the service into the service table */
        write_lock_bh(&__ip_vs_svc_lock);
@@ -1221,6 +1271,19 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
        }
        old_sched = sched;
 
+#ifdef CONFIG_IP_VS_IPV6
+       if (u->af == AF_INET6) {
+               if (!sched->supports_ipv6) {
+                       ret = -EAFNOSUPPORT;
+                       goto out;
+               }
+               if ((u->netmask < 1) || (u->netmask > 128)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+#endif
+
        write_lock_bh(&__ip_vs_svc_lock);
 
        /*
@@ -1242,7 +1305,7 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
                 */
                if ((ret = ip_vs_unbind_scheduler(svc))) {
                        old_sched = sched;
-                       goto out;
+                       goto out_unlock;
                }
 
                /*
@@ -1261,12 +1324,13 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
                         */
                        ip_vs_bind_scheduler(svc, old_sched);
                        old_sched = sched;
-                       goto out;
+                       goto out_unlock;
                }
        }
 
-  out:
+  out_unlock:
        write_unlock_bh(&__ip_vs_svc_lock);
+  out:
 
        if (old_sched)
                ip_vs_scheduler_put(old_sched);
@@ -1285,7 +1349,10 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
        struct ip_vs_dest *dest, *nxt;
        struct ip_vs_scheduler *old_sched;
 
-       ip_vs_num_services--;
+       /* Count only IPv4 services for old get/setsockopt interface */
+       if (svc->af == AF_INET)
+               ip_vs_num_services--;
+
        ip_vs_kill_estimator(&svc->stats);
 
        /* Unbind scheduler */
@@ -1720,6 +1787,7 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
 }
 
 static void *ip_vs_info_seq_start(struct seq_file *seq, loff_t *pos)
+__acquires(__ip_vs_svc_lock)
 {
 
        read_lock_bh(&__ip_vs_svc_lock);
@@ -1773,6 +1841,7 @@ static void *ip_vs_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 }
 
 static void ip_vs_info_seq_stop(struct seq_file *seq, void *v)
+__releases(__ip_vs_svc_lock)
 {
        read_unlock_bh(&__ip_vs_svc_lock);
 }
@@ -1887,20 +1956,20 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v)
                   "   Conns  Packets  Packets            Bytes            Bytes\n");
 
        spin_lock_bh(&ip_vs_stats.lock);
-       seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.conns,
-                  ip_vs_stats.inpkts, ip_vs_stats.outpkts,
-                  (unsigned long long) ip_vs_stats.inbytes,
-                  (unsigned long long) ip_vs_stats.outbytes);
+       seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.ustats.conns,
+                  ip_vs_stats.ustats.inpkts, ip_vs_stats.ustats.outpkts,
+                  (unsigned long long) ip_vs_stats.ustats.inbytes,
+                  (unsigned long long) ip_vs_stats.ustats.outbytes);
 
 /*                 01234567 01234567 01234567 0123456701234567 0123456701234567 */
        seq_puts(seq,
                   " Conns/s   Pkts/s   Pkts/s          Bytes/s          Bytes/s\n");
        seq_printf(seq,"%8X %8X %8X %16X %16X\n",
-                       ip_vs_stats.cps,
-                       ip_vs_stats.inpps,
-                       ip_vs_stats.outpps,
-                       ip_vs_stats.inbps,
-                       ip_vs_stats.outbps);
+                       ip_vs_stats.ustats.cps,
+                       ip_vs_stats.ustats.inpps,
+                       ip_vs_stats.ustats.outpps,
+                       ip_vs_stats.ustats.inbps,
+                       ip_vs_stats.ustats.outbps);
        spin_unlock_bh(&ip_vs_stats.lock);
 
        return 0;
@@ -2138,7 +2207,7 @@ static void
 ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src)
 {
        spin_lock_bh(&src->lock);
-       memcpy(dst, src, (char*)&src->lock - (char*)src);
+       memcpy(dst, &src->ustats, sizeof(*dst));
        spin_unlock_bh(&src->lock);
 }
 
@@ -2168,6 +2237,10 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
 
        for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
                list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
+                       /* Only expose IPv4 entries to old interface */
+                       if (svc->af != AF_INET)
+                               continue;
+
                        if (count >= get->num_services)
                                goto out;
                        memset(&entry, 0, sizeof(entry));
@@ -2183,6 +2256,10 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
 
        for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
                list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
+                       /* Only expose IPv4 entries to old interface */
+                       if (svc->af != AF_INET)
+                               continue;
+
                        if (count >= get->num_services)
                                goto out;
                        memset(&entry, 0, sizeof(entry));
@@ -2506,16 +2583,16 @@ static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
 
        spin_lock_bh(&stats->lock);
 
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, stats->conns);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, stats->inpkts);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, stats->outpkts);
-       NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, stats->inbytes);
-       NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, stats->outbytes);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, stats->cps);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, stats->inpps);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, stats->outpps);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, stats->inbps);
-       NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, stats->outbps);
+       NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, stats->ustats.conns);
+       NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, stats->ustats.inpkts);
+       NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, stats->ustats.outpkts);
+       NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, stats->ustats.inbytes);
+       NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, stats->ustats.outbytes);
+       NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, stats->ustats.cps);
+       NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, stats->ustats.inpps);
+       NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, stats->ustats.outpps);
+       NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, stats->ustats.inbps);
+       NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, stats->ustats.outbps);
 
        spin_unlock_bh(&stats->lock);
 
@@ -2540,7 +2617,7 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
        if (!nl_service)
                return -EMSGSIZE;
 
-       NLA_PUT_U16(skb, IPVS_SVC_ATTR_AF, AF_INET);
+       NLA_PUT_U16(skb, IPVS_SVC_ATTR_AF, svc->af);
 
        if (svc->fwmark) {
                NLA_PUT_U32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark);
@@ -2647,8 +2724,11 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
                return -EINVAL;
 
        usvc->af = nla_get_u16(nla_af);
-       /* For now, only support IPv4 */
-       if (nla_get_u16(nla_af) != AF_INET)
+#ifdef CONFIG_IP_VS_IPV6
+       if (usvc->af != AF_INET && usvc->af != AF_INET6)
+#else
+       if (usvc->af != AF_INET)
+#endif
                return -EAFNOSUPPORT;
 
        if (nla_fwmark) {