2 * ip_vs_proto_udp.c: UDP load balancing support for IPVS
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
5 * Julian Anastasov <ja@ssi.bg>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
18 #include <linux/kernel.h>
19 #include <linux/netfilter.h>
20 #include <linux/netfilter_ipv4.h>
21 #include <linux/udp.h>
23 #include <net/ip_vs.h>
26 static struct ip_vs_conn *
27 udp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
28 const struct ip_vs_iphdr *iph, unsigned int proto_off,
31 struct ip_vs_conn *cp;
32 __be16 _ports[2], *pptr;
34 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
38 if (likely(!inverse)) {
39 cp = ip_vs_conn_in_get(af, iph->protocol,
41 &iph->daddr, pptr[1]);
43 cp = ip_vs_conn_in_get(af, iph->protocol,
45 &iph->saddr, pptr[0]);
52 static struct ip_vs_conn *
53 udp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
54 const struct ip_vs_iphdr *iph, unsigned int proto_off,
57 struct ip_vs_conn *cp;
58 __be16 _ports[2], *pptr;
60 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
64 if (likely(!inverse)) {
65 cp = ip_vs_conn_out_get(af, iph->protocol,
67 &iph->daddr, pptr[1]);
69 cp = ip_vs_conn_out_get(af, iph->protocol,
71 &iph->saddr, pptr[0]);
79 udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
80 int *verdict, struct ip_vs_conn **cpp)
82 struct ip_vs_service *svc;
83 struct udphdr _udph, *uh;
84 struct ip_vs_iphdr iph;
86 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
88 uh = skb_header_pointer(skb, iph.len, sizeof(_udph), &_udph);
94 svc = ip_vs_service_get(af, skb->mark, iph.protocol,
95 &iph.daddr, uh->dest);
99 * It seems that we are very loaded.
100 * We have to drop this packet :(
102 ip_vs_service_put(svc);
108 * Let the virtual server select a real server for the
109 * incoming connection, and create a connection entry.
111 *cpp = ip_vs_schedule(svc, skb);
113 *verdict = ip_vs_leave(svc, skb, pp);
116 ip_vs_service_put(svc);
123 udp_fast_csum_update(int af, struct udphdr *uhdr,
124 const union nf_inet_addr *oldip,
125 const union nf_inet_addr *newip,
126 __be16 oldport, __be16 newport)
128 #ifdef CONFIG_IP_VS_IPV6
131 csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
132 ip_vs_check_diff2(oldport, newport,
133 ~csum_unfold(uhdr->check))));
137 csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
138 ip_vs_check_diff2(oldport, newport,
139 ~csum_unfold(uhdr->check))));
141 uhdr->check = CSUM_MANGLED_0;
145 udp_snat_handler(struct sk_buff *skb,
146 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
149 unsigned int udphoff;
151 #ifdef CONFIG_IP_VS_IPV6
152 if (cp->af == AF_INET6)
153 udphoff = sizeof(struct ipv6hdr);
156 udphoff = ip_hdrlen(skb);
158 /* csum_check requires unshared skb */
159 if (!skb_make_writable(skb, udphoff+sizeof(*udph)))
162 if (unlikely(cp->app != NULL)) {
163 /* Some checks before mangling */
164 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
168 * Call application helper if needed
170 if (!ip_vs_app_pkt_out(cp, skb))
174 udph = (void *)skb_network_header(skb) + udphoff;
175 udph->source = cp->vport;
178 * Adjust UDP checksums
180 if (!cp->app && (udph->check != 0)) {
181 /* Only port and addr are changed, do fast csum update */
182 udp_fast_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
183 cp->dport, cp->vport);
184 if (skb->ip_summed == CHECKSUM_COMPLETE)
185 skb->ip_summed = CHECKSUM_NONE;
187 /* full checksum calculation */
189 skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
190 #ifdef CONFIG_IP_VS_IPV6
191 if (cp->af == AF_INET6)
192 udph->check = csum_ipv6_magic(&cp->vaddr.in6,
195 cp->protocol, skb->csum);
198 udph->check = csum_tcpudp_magic(cp->vaddr.ip,
203 if (udph->check == 0)
204 udph->check = CSUM_MANGLED_0;
205 IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
206 pp->name, udph->check,
207 (char*)&(udph->check) - (char*)udph);
214 udp_dnat_handler(struct sk_buff *skb,
215 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
218 unsigned int udphoff;
220 #ifdef CONFIG_IP_VS_IPV6
221 if (cp->af == AF_INET6)
222 udphoff = sizeof(struct ipv6hdr);
225 udphoff = ip_hdrlen(skb);
227 /* csum_check requires unshared skb */
228 if (!skb_make_writable(skb, udphoff+sizeof(*udph)))
231 if (unlikely(cp->app != NULL)) {
232 /* Some checks before mangling */
233 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
237 * Attempt ip_vs_app call.
238 * It will fix ip_vs_conn
240 if (!ip_vs_app_pkt_in(cp, skb))
244 udph = (void *)skb_network_header(skb) + udphoff;
245 udph->dest = cp->dport;
248 * Adjust UDP checksums
250 if (!cp->app && (udph->check != 0)) {
251 /* Only port and addr are changed, do fast csum update */
252 udp_fast_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr,
253 cp->vport, cp->dport);
254 if (skb->ip_summed == CHECKSUM_COMPLETE)
255 skb->ip_summed = CHECKSUM_NONE;
257 /* full checksum calculation */
259 skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
260 #ifdef CONFIG_IP_VS_IPV6
261 if (cp->af == AF_INET6)
262 udph->check = csum_ipv6_magic(&cp->caddr.in6,
265 cp->protocol, skb->csum);
268 udph->check = csum_tcpudp_magic(cp->caddr.ip,
273 if (udph->check == 0)
274 udph->check = CSUM_MANGLED_0;
275 skb->ip_summed = CHECKSUM_UNNECESSARY;
282 udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
284 struct udphdr _udph, *uh;
285 unsigned int udphoff;
287 #ifdef CONFIG_IP_VS_IPV6
289 udphoff = sizeof(struct ipv6hdr);
292 udphoff = ip_hdrlen(skb);
294 uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph);
298 if (uh->check != 0) {
299 switch (skb->ip_summed) {
301 skb->csum = skb_checksum(skb, udphoff,
302 skb->len - udphoff, 0);
303 case CHECKSUM_COMPLETE:
304 #ifdef CONFIG_IP_VS_IPV6
305 if (af == AF_INET6) {
306 if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
307 &ipv6_hdr(skb)->daddr,
309 ipv6_hdr(skb)->nexthdr,
311 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
312 "Failed checksum for");
317 if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
320 ip_hdr(skb)->protocol,
322 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
323 "Failed checksum for");
328 /* No need to checksum. */
337 * Note: the caller guarantees that only one of register_app,
338 * unregister_app or app_conn_bind is called each time.
341 #define UDP_APP_TAB_BITS 4
342 #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS)
343 #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1)
345 static struct list_head udp_apps[UDP_APP_TAB_SIZE];
346 static DEFINE_SPINLOCK(udp_app_lock);
348 static inline __u16 udp_app_hashkey(__be16 port)
350 return (((__force u16)port >> UDP_APP_TAB_BITS) ^ (__force u16)port)
355 static int udp_register_app(struct ip_vs_app *inc)
359 __be16 port = inc->port;
362 hash = udp_app_hashkey(port);
365 spin_lock_bh(&udp_app_lock);
366 list_for_each_entry(i, &udp_apps[hash], p_list) {
367 if (i->port == port) {
372 list_add(&inc->p_list, &udp_apps[hash]);
373 atomic_inc(&ip_vs_protocol_udp.appcnt);
376 spin_unlock_bh(&udp_app_lock);
382 udp_unregister_app(struct ip_vs_app *inc)
384 spin_lock_bh(&udp_app_lock);
385 atomic_dec(&ip_vs_protocol_udp.appcnt);
386 list_del(&inc->p_list);
387 spin_unlock_bh(&udp_app_lock);
391 static int udp_app_conn_bind(struct ip_vs_conn *cp)
394 struct ip_vs_app *inc;
397 /* Default binding: bind app only for NAT */
398 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
401 /* Lookup application incarnations and bind the right one */
402 hash = udp_app_hashkey(cp->vport);
404 spin_lock(&udp_app_lock);
405 list_for_each_entry(inc, &udp_apps[hash], p_list) {
406 if (inc->port == cp->vport) {
407 if (unlikely(!ip_vs_app_inc_get(inc)))
409 spin_unlock(&udp_app_lock);
411 IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->"
412 "%u.%u.%u.%u:%u to app %s on port %u\n",
414 NIPQUAD(cp->caddr.ip), ntohs(cp->cport),
415 NIPQUAD(cp->vaddr.ip), ntohs(cp->vport),
416 inc->name, ntohs(inc->port));
419 result = inc->init_conn(inc, cp);
423 spin_unlock(&udp_app_lock);
430 static int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
431 [IP_VS_UDP_S_NORMAL] = 5*60*HZ,
432 [IP_VS_UDP_S_LAST] = 2*HZ,
435 static char * udp_state_name_table[IP_VS_UDP_S_LAST+1] = {
436 [IP_VS_UDP_S_NORMAL] = "UDP",
437 [IP_VS_UDP_S_LAST] = "BUG!",
442 udp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
444 return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_UDP_S_LAST,
445 udp_state_name_table, sname, to);
448 static const char * udp_state_name(int state)
450 if (state >= IP_VS_UDP_S_LAST)
452 return udp_state_name_table[state] ? udp_state_name_table[state] : "?";
456 udp_state_transition(struct ip_vs_conn *cp, int direction,
457 const struct sk_buff *skb,
458 struct ip_vs_protocol *pp)
460 cp->timeout = pp->timeout_table[IP_VS_UDP_S_NORMAL];
464 static void udp_init(struct ip_vs_protocol *pp)
466 IP_VS_INIT_HASH_TABLE(udp_apps);
467 pp->timeout_table = udp_timeouts;
470 static void udp_exit(struct ip_vs_protocol *pp)
475 struct ip_vs_protocol ip_vs_protocol_udp = {
477 .protocol = IPPROTO_UDP,
478 .num_states = IP_VS_UDP_S_LAST,
482 .conn_schedule = udp_conn_schedule,
483 .conn_in_get = udp_conn_in_get,
484 .conn_out_get = udp_conn_out_get,
485 .snat_handler = udp_snat_handler,
486 .dnat_handler = udp_dnat_handler,
487 .csum_check = udp_csum_check,
488 .state_transition = udp_state_transition,
489 .state_name = udp_state_name,
490 .register_app = udp_register_app,
491 .unregister_app = udp_unregister_app,
492 .app_conn_bind = udp_app_conn_bind,
493 .debug_packet = ip_vs_tcpudp_debug_packet,
494 .timeout_change = NULL,
495 .set_state_timeout = udp_set_state_timeout,