[DCCP]: Move dccp_[un]hash from ipv4.c to the core
[safe/jmp/linux-2.6] / net / dccp / ipv4.c
1 /*
2  *  net/dccp/ipv4.c
3  *
4  *  An implementation of the DCCP protocol
5  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6  *
7  *      This program is free software; you can redistribute it and/or
8  *      modify it under the terms of the GNU General Public License
9  *      as published by the Free Software Foundation; either version
10  *      2 of the License, or (at your option) any later version.
11  */
12
13 #include <linux/config.h>
14 #include <linux/dccp.h>
15 #include <linux/icmp.h>
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/random.h>
19
20 #include <net/icmp.h>
21 #include <net/inet_hashtables.h>
22 #include <net/inet_sock.h>
23 #include <net/sock.h>
24 #include <net/timewait_sock.h>
25 #include <net/tcp_states.h>
26 #include <net/xfrm.h>
27
28 #include "ackvec.h"
29 #include "ccid.h"
30 #include "dccp.h"
31 #include "feat.h"
32
33 struct inet_hashinfo __cacheline_aligned dccp_hashinfo = {
34         .lhash_lock     = RW_LOCK_UNLOCKED,
35         .lhash_users    = ATOMIC_INIT(0),
36         .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(dccp_hashinfo.lhash_wait),
37 };
38
39 EXPORT_SYMBOL_GPL(dccp_hashinfo);
40
41 static int dccp_v4_get_port(struct sock *sk, const unsigned short snum)
42 {
43         return inet_csk_get_port(&dccp_hashinfo, sk, snum,
44                                  inet_csk_bind_conflict);
45 }
46
47 int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
48 {
49         struct inet_sock *inet = inet_sk(sk);
50         struct dccp_sock *dp = dccp_sk(sk);
51         const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
52         struct rtable *rt;
53         u32 daddr, nexthop;
54         int tmp;
55         int err;
56
57         dp->dccps_role = DCCP_ROLE_CLIENT;
58
59         if (dccp_service_not_initialized(sk))
60                 return -EPROTO;
61
62         if (addr_len < sizeof(struct sockaddr_in))
63                 return -EINVAL;
64
65         if (usin->sin_family != AF_INET)
66                 return -EAFNOSUPPORT;
67
68         nexthop = daddr = usin->sin_addr.s_addr;
69         if (inet->opt != NULL && inet->opt->srr) {
70                 if (daddr == 0)
71                         return -EINVAL;
72                 nexthop = inet->opt->faddr;
73         }
74
75         tmp = ip_route_connect(&rt, nexthop, inet->saddr,
76                                RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
77                                IPPROTO_DCCP,
78                                inet->sport, usin->sin_port, sk);
79         if (tmp < 0)
80                 return tmp;
81
82         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
83                 ip_rt_put(rt);
84                 return -ENETUNREACH;
85         }
86
87         if (inet->opt == NULL || !inet->opt->srr)
88                 daddr = rt->rt_dst;
89
90         if (inet->saddr == 0)
91                 inet->saddr = rt->rt_src;
92         inet->rcv_saddr = inet->saddr;
93
94         inet->dport = usin->sin_port;
95         inet->daddr = daddr;
96
97         inet_csk(sk)->icsk_ext_hdr_len = 0;
98         if (inet->opt != NULL)
99                 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
100         /*
101          * Socket identity is still unknown (sport may be zero).
102          * However we set state to DCCP_REQUESTING and not releasing socket
103          * lock select source port, enter ourselves into the hash tables and
104          * complete initialization after this.
105          */
106         dccp_set_state(sk, DCCP_REQUESTING);
107         err = inet_hash_connect(&dccp_death_row, sk);
108         if (err != 0)
109                 goto failure;
110
111         err = ip_route_newports(&rt, IPPROTO_DCCP, inet->sport, inet->dport,
112                                 sk);
113         if (err != 0)
114                 goto failure;
115
116         /* OK, now commit destination to socket.  */
117         sk_setup_caps(sk, &rt->u.dst);
118
119         dp->dccps_gar =
120                 dp->dccps_iss = secure_dccp_sequence_number(inet->saddr,
121                                                             inet->daddr,
122                                                             inet->sport,
123                                                             usin->sin_port);
124         dccp_update_gss(sk, dp->dccps_iss);
125
126         inet->id = dp->dccps_iss ^ jiffies;
127
128         err = dccp_connect(sk);
129         rt = NULL;
130         if (err != 0)
131                 goto failure;
132 out:
133         return err;
134 failure:
135         /*
136          * This unhashes the socket and releases the local port, if necessary.
137          */
138         dccp_set_state(sk, DCCP_CLOSED);
139         ip_rt_put(rt);
140         sk->sk_route_caps = 0;
141         inet->dport = 0;
142         goto out;
143 }
144
145 EXPORT_SYMBOL_GPL(dccp_v4_connect);
146
147 /*
148  * This routine does path mtu discovery as defined in RFC1191.
149  */
150 static inline void dccp_do_pmtu_discovery(struct sock *sk,
151                                           const struct iphdr *iph,
152                                           u32 mtu)
153 {
154         struct dst_entry *dst;
155         const struct inet_sock *inet = inet_sk(sk);
156         const struct dccp_sock *dp = dccp_sk(sk);
157
158         /* We are not interested in DCCP_LISTEN and request_socks (RESPONSEs
159          * send out by Linux are always < 576bytes so they should go through
160          * unfragmented).
161          */
162         if (sk->sk_state == DCCP_LISTEN)
163                 return;
164
165         /* We don't check in the destentry if pmtu discovery is forbidden
166          * on this route. We just assume that no packet_to_big packets
167          * are send back when pmtu discovery is not active.
168          * There is a small race when the user changes this flag in the
169          * route, but I think that's acceptable.
170          */
171         if ((dst = __sk_dst_check(sk, 0)) == NULL)
172                 return;
173
174         dst->ops->update_pmtu(dst, mtu);
175
176         /* Something is about to be wrong... Remember soft error
177          * for the case, if this connection will not able to recover.
178          */
179         if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
180                 sk->sk_err_soft = EMSGSIZE;
181
182         mtu = dst_mtu(dst);
183
184         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
185             inet_csk(sk)->icsk_pmtu_cookie > mtu) {
186                 dccp_sync_mss(sk, mtu);
187
188                 /*
189                  * From: draft-ietf-dccp-spec-11.txt
190                  *
191                  *      DCCP-Sync packets are the best choice for upward
192                  *      probing, since DCCP-Sync probes do not risk application
193                  *      data loss.
194                  */
195                 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
196         } /* else let the usual retransmit timer handle it */
197 }
198
199 static void dccp_v4_ctl_send_ack(struct sk_buff *rxskb)
200 {
201         int err;
202         struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
203         const int dccp_hdr_ack_len = sizeof(struct dccp_hdr) +
204                                      sizeof(struct dccp_hdr_ext) +
205                                      sizeof(struct dccp_hdr_ack_bits);
206         struct sk_buff *skb;
207
208         if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL)
209                 return;
210
211         skb = alloc_skb(MAX_DCCP_HEADER + 15, GFP_ATOMIC);
212         if (skb == NULL)
213                 return;
214
215         /* Reserve space for headers. */
216         skb_reserve(skb, MAX_DCCP_HEADER);
217
218         skb->dst = dst_clone(rxskb->dst);
219
220         skb->h.raw = skb_push(skb, dccp_hdr_ack_len);
221         dh = dccp_hdr(skb);
222         memset(dh, 0, dccp_hdr_ack_len);
223
224         /* Build DCCP header and checksum it. */
225         dh->dccph_type     = DCCP_PKT_ACK;
226         dh->dccph_sport    = rxdh->dccph_dport;
227         dh->dccph_dport    = rxdh->dccph_sport;
228         dh->dccph_doff     = dccp_hdr_ack_len / 4;
229         dh->dccph_x        = 1;
230
231         dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq);
232         dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
233                          DCCP_SKB_CB(rxskb)->dccpd_seq);
234
235         bh_lock_sock(dccp_ctl_socket->sk);
236         err = ip_build_and_send_pkt(skb, dccp_ctl_socket->sk,
237                                     rxskb->nh.iph->daddr,
238                                     rxskb->nh.iph->saddr, NULL);
239         bh_unlock_sock(dccp_ctl_socket->sk);
240
241         if (err == NET_XMIT_CN || err == 0) {
242                 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
243                 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
244         }
245 }
246
247 static void dccp_v4_reqsk_send_ack(struct sk_buff *skb,
248                                    struct request_sock *req)
249 {
250         dccp_v4_ctl_send_ack(skb);
251 }
252
253 static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
254                                  struct dst_entry *dst)
255 {
256         int err = -1;
257         struct sk_buff *skb;
258
259         /* First, grab a route. */
260         
261         if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
262                 goto out;
263
264         skb = dccp_make_response(sk, dst, req);
265         if (skb != NULL) {
266                 const struct inet_request_sock *ireq = inet_rsk(req);
267
268                 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
269                 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
270                                             ireq->rmt_addr,
271                                             ireq->opt);
272                 if (err == NET_XMIT_CN)
273                         err = 0;
274         }
275
276 out:
277         dst_release(dst);
278         return err;
279 }
280
281 /*
282  * This routine is called by the ICMP module when it gets some sort of error
283  * condition. If err < 0 then the socket should be closed and the error
284  * returned to the user. If err > 0 it's just the icmp type << 8 | icmp code.
285  * After adjustment header points to the first 8 bytes of the tcp header. We
286  * need to find the appropriate port.
287  *
288  * The locking strategy used here is very "optimistic". When someone else
289  * accesses the socket the ICMP is just dropped and for some paths there is no
290  * check at all. A more general error queue to queue errors for later handling
291  * is probably better.
292  */
293 void dccp_v4_err(struct sk_buff *skb, u32 info)
294 {
295         const struct iphdr *iph = (struct iphdr *)skb->data;
296         const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data +
297                                                         (iph->ihl << 2));
298         struct dccp_sock *dp;
299         struct inet_sock *inet;
300         const int type = skb->h.icmph->type;
301         const int code = skb->h.icmph->code;
302         struct sock *sk;
303         __u64 seq;
304         int err;
305
306         if (skb->len < (iph->ihl << 2) + 8) {
307                 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
308                 return;
309         }
310
311         sk = inet_lookup(&dccp_hashinfo, iph->daddr, dh->dccph_dport,
312                          iph->saddr, dh->dccph_sport, inet_iif(skb));
313         if (sk == NULL) {
314                 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
315                 return;
316         }
317
318         if (sk->sk_state == DCCP_TIME_WAIT) {
319                 inet_twsk_put((struct inet_timewait_sock *)sk);
320                 return;
321         }
322
323         bh_lock_sock(sk);
324         /* If too many ICMPs get dropped on busy
325          * servers this needs to be solved differently.
326          */
327         if (sock_owned_by_user(sk))
328                 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
329
330         if (sk->sk_state == DCCP_CLOSED)
331                 goto out;
332
333         dp = dccp_sk(sk);
334         seq = dccp_hdr_seq(skb);
335         if (sk->sk_state != DCCP_LISTEN &&
336             !between48(seq, dp->dccps_swl, dp->dccps_swh)) {
337                 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
338                 goto out;
339         }
340
341         switch (type) {
342         case ICMP_SOURCE_QUENCH:
343                 /* Just silently ignore these. */
344                 goto out;
345         case ICMP_PARAMETERPROB:
346                 err = EPROTO;
347                 break;
348         case ICMP_DEST_UNREACH:
349                 if (code > NR_ICMP_UNREACH)
350                         goto out;
351
352                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
353                         if (!sock_owned_by_user(sk))
354                                 dccp_do_pmtu_discovery(sk, iph, info);
355                         goto out;
356                 }
357
358                 err = icmp_err_convert[code].errno;
359                 break;
360         case ICMP_TIME_EXCEEDED:
361                 err = EHOSTUNREACH;
362                 break;
363         default:
364                 goto out;
365         }
366
367         switch (sk->sk_state) {
368                 struct request_sock *req , **prev;
369         case DCCP_LISTEN:
370                 if (sock_owned_by_user(sk))
371                         goto out;
372                 req = inet_csk_search_req(sk, &prev, dh->dccph_dport,
373                                           iph->daddr, iph->saddr);
374                 if (!req)
375                         goto out;
376
377                 /*
378                  * ICMPs are not backlogged, hence we cannot get an established
379                  * socket here.
380                  */
381                 BUG_TRAP(!req->sk);
382
383                 if (seq != dccp_rsk(req)->dreq_iss) {
384                         NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
385                         goto out;
386                 }
387                 /*
388                  * Still in RESPOND, just remove it silently.
389                  * There is no good way to pass the error to the newly
390                  * created socket, and POSIX does not want network
391                  * errors returned from accept().
392                  */
393                 inet_csk_reqsk_queue_drop(sk, req, prev);
394                 goto out;
395
396         case DCCP_REQUESTING:
397         case DCCP_RESPOND:
398                 if (!sock_owned_by_user(sk)) {
399                         DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
400                         sk->sk_err = err;
401
402                         sk->sk_error_report(sk);
403
404                         dccp_done(sk);
405                 } else
406                         sk->sk_err_soft = err;
407                 goto out;
408         }
409
410         /* If we've already connected we will keep trying
411          * until we time out, or the user gives up.
412          *
413          * rfc1122 4.2.3.9 allows to consider as hard errors
414          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
415          * but it is obsoleted by pmtu discovery).
416          *
417          * Note, that in modern internet, where routing is unreliable
418          * and in each dark corner broken firewalls sit, sending random
419          * errors ordered by their masters even this two messages finally lose
420          * their original sense (even Linux sends invalid PORT_UNREACHs)
421          *
422          * Now we are in compliance with RFCs.
423          *                                                      --ANK (980905)
424          */
425
426         inet = inet_sk(sk);
427         if (!sock_owned_by_user(sk) && inet->recverr) {
428                 sk->sk_err = err;
429                 sk->sk_error_report(sk);
430         } else /* Only an error on timeout */
431                 sk->sk_err_soft = err;
432 out:
433         bh_unlock_sock(sk);
434         sock_put(sk);
435 }
436
437 /* This routine computes an IPv4 DCCP checksum. */
438 void dccp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
439 {
440         const struct inet_sock *inet = inet_sk(sk);
441         struct dccp_hdr *dh = dccp_hdr(skb);
442
443         dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr, inet->daddr);
444 }
445
446 EXPORT_SYMBOL_GPL(dccp_v4_send_check);
447
448 static inline u64 dccp_v4_init_sequence(const struct sock *sk,
449                                         const struct sk_buff *skb)
450 {
451         return secure_dccp_sequence_number(skb->nh.iph->daddr,
452                                            skb->nh.iph->saddr,
453                                            dccp_hdr(skb)->dccph_dport,
454                                            dccp_hdr(skb)->dccph_sport);
455 }
456
457 int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
458 {
459         struct inet_request_sock *ireq;
460         struct dccp_sock dp;
461         struct request_sock *req;
462         struct dccp_request_sock *dreq;
463         const __be32 saddr = skb->nh.iph->saddr;
464         const __be32 daddr = skb->nh.iph->daddr;
465         const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
466         struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
467         __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
468
469         /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */
470         if (((struct rtable *)skb->dst)->rt_flags &
471             (RTCF_BROADCAST | RTCF_MULTICAST)) {
472                 reset_code = DCCP_RESET_CODE_NO_CONNECTION;
473                 goto drop;
474         }
475
476         if (dccp_bad_service_code(sk, service)) {
477                 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
478                 goto drop;
479         }
480         /*
481          * TW buckets are converted to open requests without
482          * limitations, they conserve resources and peer is
483          * evidently real one.
484          */
485         if (inet_csk_reqsk_queue_is_full(sk))
486                 goto drop;
487
488         /*
489          * Accept backlog is full. If we have already queued enough
490          * of warm entries in syn queue, drop request. It is better than
491          * clogging syn queue with openreqs with exponentially increasing
492          * timeout.
493          */
494         if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
495                 goto drop;
496
497         req = reqsk_alloc(sk->sk_prot->rsk_prot);
498         if (req == NULL)
499                 goto drop;
500
501         if (dccp_parse_options(sk, skb))
502                 goto drop;
503
504         dccp_openreq_init(req, &dp, skb);
505
506         ireq = inet_rsk(req);
507         ireq->loc_addr = daddr;
508         ireq->rmt_addr = saddr;
509         req->rcv_wnd    = 100; /* Fake, option parsing will get the
510                                   right value */
511         ireq->opt       = NULL;
512
513         /* 
514          * Step 3: Process LISTEN state
515          *
516          * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
517          *
518          * In fact we defer setting S.GSR, S.SWL, S.SWH to
519          * dccp_create_openreq_child.
520          */
521         dreq = dccp_rsk(req);
522         dreq->dreq_isr     = dcb->dccpd_seq;
523         dreq->dreq_iss     = dccp_v4_init_sequence(sk, skb);
524         dreq->dreq_service = service;
525
526         if (dccp_v4_send_response(sk, req, NULL))
527                 goto drop_and_free;
528
529         inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
530         return 0;
531
532 drop_and_free:
533         reqsk_free(req);
534 drop:
535         DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
536         dcb->dccpd_reset_code = reset_code;
537         return -1;
538 }
539
540 EXPORT_SYMBOL_GPL(dccp_v4_conn_request);
541
542 /*
543  * The three way handshake has completed - we got a valid ACK or DATAACK -
544  * now create the new socket.
545  *
546  * This is the equivalent of TCP's tcp_v4_syn_recv_sock
547  */
548 struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
549                                        struct request_sock *req,
550                                        struct dst_entry *dst)
551 {
552         struct inet_request_sock *ireq;
553         struct inet_sock *newinet;
554         struct dccp_sock *newdp;
555         struct sock *newsk;
556
557         if (sk_acceptq_is_full(sk))
558                 goto exit_overflow;
559
560         if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
561                 goto exit;
562
563         newsk = dccp_create_openreq_child(sk, req, skb);
564         if (newsk == NULL)
565                 goto exit;
566
567         sk_setup_caps(newsk, dst);
568
569         newdp              = dccp_sk(newsk);
570         newinet            = inet_sk(newsk);
571         ireq               = inet_rsk(req);
572         newinet->daddr     = ireq->rmt_addr;
573         newinet->rcv_saddr = ireq->loc_addr;
574         newinet->saddr     = ireq->loc_addr;
575         newinet->opt       = ireq->opt;
576         ireq->opt          = NULL;
577         newinet->mc_index  = inet_iif(skb);
578         newinet->mc_ttl    = skb->nh.iph->ttl;
579         newinet->id        = jiffies;
580
581         dccp_sync_mss(newsk, dst_mtu(dst));
582
583         __inet_hash(&dccp_hashinfo, newsk, 0);
584         __inet_inherit_port(&dccp_hashinfo, sk, newsk);
585
586         return newsk;
587
588 exit_overflow:
589         NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
590 exit:
591         NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
592         dst_release(dst);
593         return NULL;
594 }
595
596 EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock);
597
598 static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
599 {
600         const struct dccp_hdr *dh = dccp_hdr(skb);
601         const struct iphdr *iph = skb->nh.iph;
602         struct sock *nsk;
603         struct request_sock **prev;
604         /* Find possible connection requests. */
605         struct request_sock *req = inet_csk_search_req(sk, &prev,
606                                                        dh->dccph_sport,
607                                                        iph->saddr, iph->daddr);
608         if (req != NULL)
609                 return dccp_check_req(sk, skb, req, prev);
610
611         nsk = __inet_lookup_established(&dccp_hashinfo,
612                                         iph->saddr, dh->dccph_sport,
613                                         iph->daddr, ntohs(dh->dccph_dport),
614                                         inet_iif(skb));
615         if (nsk != NULL) {
616                 if (nsk->sk_state != DCCP_TIME_WAIT) {
617                         bh_lock_sock(nsk);
618                         return nsk;
619                 }
620                 inet_twsk_put((struct inet_timewait_sock *)nsk);
621                 return NULL;
622         }
623
624         return sk;
625 }
626
627 int dccp_v4_checksum(const struct sk_buff *skb, const __be32 saddr,
628                      const __be32 daddr)
629 {
630         const struct dccp_hdr* dh = dccp_hdr(skb);
631         int checksum_len;
632         u32 tmp;
633
634         if (dh->dccph_cscov == 0)
635                 checksum_len = skb->len;
636         else {
637                 checksum_len = (dh->dccph_cscov + dh->dccph_x) * sizeof(u32);
638                 checksum_len = checksum_len < skb->len ? checksum_len :
639                                                          skb->len;
640         }
641
642         tmp = csum_partial((unsigned char *)dh, checksum_len, 0);
643         return csum_tcpudp_magic(saddr, daddr, checksum_len,
644                                  IPPROTO_DCCP, tmp);
645 }
646
647 static int dccp_v4_verify_checksum(struct sk_buff *skb,
648                                    const __be32 saddr, const __be32 daddr)
649 {
650         struct dccp_hdr *dh = dccp_hdr(skb);
651         int checksum_len;
652         u32 tmp;
653
654         if (dh->dccph_cscov == 0)
655                 checksum_len = skb->len;
656         else {
657                 checksum_len = (dh->dccph_cscov + dh->dccph_x) * sizeof(u32);
658                 checksum_len = checksum_len < skb->len ? checksum_len :
659                                                          skb->len;
660         }
661         tmp = csum_partial((unsigned char *)dh, checksum_len, 0);
662         return csum_tcpudp_magic(saddr, daddr, checksum_len,
663                                  IPPROTO_DCCP, tmp) == 0 ? 0 : -1;
664 }
665
666 static struct dst_entry* dccp_v4_route_skb(struct sock *sk,
667                                            struct sk_buff *skb)
668 {
669         struct rtable *rt;
670         struct flowi fl = { .oif = ((struct rtable *)skb->dst)->rt_iif,
671                             .nl_u = { .ip4_u =
672                                       { .daddr = skb->nh.iph->saddr,
673                                         .saddr = skb->nh.iph->daddr,
674                                         .tos = RT_CONN_FLAGS(sk) } },
675                             .proto = sk->sk_protocol,
676                             .uli_u = { .ports =
677                                        { .sport = dccp_hdr(skb)->dccph_dport,
678                                          .dport = dccp_hdr(skb)->dccph_sport }
679                                      }
680                           };
681
682         if (ip_route_output_flow(&rt, &fl, sk, 0)) {
683                 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
684                 return NULL;
685         }
686
687         return &rt->u.dst;
688 }
689
690 static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb)
691 {
692         int err;
693         struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
694         const int dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
695                                        sizeof(struct dccp_hdr_ext) +
696                                        sizeof(struct dccp_hdr_reset);
697         struct sk_buff *skb;
698         struct dst_entry *dst;
699         u64 seqno;
700
701         /* Never send a reset in response to a reset. */
702         if (rxdh->dccph_type == DCCP_PKT_RESET)
703                 return;
704
705         if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL)
706                 return;
707
708         dst = dccp_v4_route_skb(dccp_ctl_socket->sk, rxskb);
709         if (dst == NULL)
710                 return;
711
712         skb = alloc_skb(MAX_DCCP_HEADER + 15, GFP_ATOMIC);
713         if (skb == NULL)
714                 goto out;
715
716         /* Reserve space for headers. */
717         skb_reserve(skb, MAX_DCCP_HEADER);
718         skb->dst = dst_clone(dst);
719
720         skb->h.raw = skb_push(skb, dccp_hdr_reset_len);
721         dh = dccp_hdr(skb);
722         memset(dh, 0, dccp_hdr_reset_len);
723
724         /* Build DCCP header and checksum it. */
725         dh->dccph_type     = DCCP_PKT_RESET;
726         dh->dccph_sport    = rxdh->dccph_dport;
727         dh->dccph_dport    = rxdh->dccph_sport;
728         dh->dccph_doff     = dccp_hdr_reset_len / 4;
729         dh->dccph_x        = 1;
730         dccp_hdr_reset(skb)->dccph_reset_code =
731                                 DCCP_SKB_CB(rxskb)->dccpd_reset_code;
732
733         /* See "8.3.1. Abnormal Termination" in draft-ietf-dccp-spec-11 */
734         seqno = 0;
735         if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
736                 dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1);
737
738         dccp_hdr_set_seq(dh, seqno);
739         dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
740                          DCCP_SKB_CB(rxskb)->dccpd_seq);
741
742         dh->dccph_checksum = dccp_v4_checksum(skb, rxskb->nh.iph->saddr,
743                                               rxskb->nh.iph->daddr);
744
745         bh_lock_sock(dccp_ctl_socket->sk);
746         err = ip_build_and_send_pkt(skb, dccp_ctl_socket->sk,
747                                     rxskb->nh.iph->daddr,
748                                     rxskb->nh.iph->saddr, NULL);
749         bh_unlock_sock(dccp_ctl_socket->sk);
750
751         if (err == NET_XMIT_CN || err == 0) {
752                 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
753                 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
754         }
755 out:
756          dst_release(dst);
757 }
758
759 int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
760 {
761         struct dccp_hdr *dh = dccp_hdr(skb);
762
763         if (sk->sk_state == DCCP_OPEN) { /* Fast path */
764                 if (dccp_rcv_established(sk, skb, dh, skb->len))
765                         goto reset;
766                 return 0;
767         }
768
769         /*
770          *  Step 3: Process LISTEN state
771          *     If S.state == LISTEN,
772          *        If P.type == Request or P contains a valid Init Cookie
773          *              option,
774          *           * Must scan the packet's options to check for an Init
775          *              Cookie.  Only the Init Cookie is processed here,
776          *              however; other options are processed in Step 8.  This
777          *              scan need only be performed if the endpoint uses Init
778          *              Cookies *
779          *           * Generate a new socket and switch to that socket *
780          *           Set S := new socket for this port pair
781          *           S.state = RESPOND
782          *           Choose S.ISS (initial seqno) or set from Init Cookie
783          *           Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
784          *           Continue with S.state == RESPOND
785          *           * A Response packet will be generated in Step 11 *
786          *        Otherwise,
787          *           Generate Reset(No Connection) unless P.type == Reset
788          *           Drop packet and return
789          *
790          * NOTE: the check for the packet types is done in
791          *       dccp_rcv_state_process
792          */
793         if (sk->sk_state == DCCP_LISTEN) {
794                 struct sock *nsk = dccp_v4_hnd_req(sk, skb);
795
796                 if (nsk == NULL)
797                         goto discard;
798
799                 if (nsk != sk) {
800                         if (dccp_child_process(sk, nsk, skb))
801                                 goto reset;
802                         return 0;
803                 }
804         }
805
806         if (dccp_rcv_state_process(sk, skb, dh, skb->len))
807                 goto reset;
808         return 0;
809
810 reset:
811         dccp_v4_ctl_send_reset(skb);
812 discard:
813         kfree_skb(skb);
814         return 0;
815 }
816
817 EXPORT_SYMBOL_GPL(dccp_v4_do_rcv);
818
819 int dccp_invalid_packet(struct sk_buff *skb)
820 {
821         const struct dccp_hdr *dh;
822
823         if (skb->pkt_type != PACKET_HOST)
824                 return 1;
825
826         if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) {
827                 LIMIT_NETDEBUG(KERN_WARNING "DCCP: pskb_may_pull failed\n");
828                 return 1;
829         }
830
831         dh = dccp_hdr(skb);
832
833         /* If the packet type is not understood, drop packet and return */
834         if (dh->dccph_type >= DCCP_PKT_INVALID) {
835                 LIMIT_NETDEBUG(KERN_WARNING "DCCP: invalid packet type\n");
836                 return 1;
837         }
838
839         /*
840          * If P.Data Offset is too small for packet type, or too large for
841          * packet, drop packet and return
842          */
843         if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
844                 LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.Data Offset(%u) "
845                                             "too small 1\n",
846                                dh->dccph_doff);
847                 return 1;
848         }
849
850         if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) {
851                 LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.Data Offset(%u) "
852                                             "too small 2\n",
853                                dh->dccph_doff);
854                 return 1;
855         }
856
857         dh = dccp_hdr(skb);
858
859         /*
860          * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
861          * has short sequence numbers), drop packet and return
862          */
863         if (dh->dccph_x == 0 &&
864             dh->dccph_type != DCCP_PKT_DATA &&
865             dh->dccph_type != DCCP_PKT_ACK &&
866             dh->dccph_type != DCCP_PKT_DATAACK) {
867                 LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.type (%s) not Data, Ack "
868                                             "nor DataAck and P.X == 0\n",
869                                dccp_packet_name(dh->dccph_type));
870                 return 1;
871         }
872
873         return 0;
874 }
875
876 EXPORT_SYMBOL_GPL(dccp_invalid_packet);
877
878 /* this is called when real data arrives */
879 int dccp_v4_rcv(struct sk_buff *skb)
880 {
881         const struct dccp_hdr *dh;
882         struct sock *sk;
883
884         /* Step 1: Check header basics: */
885
886         if (dccp_invalid_packet(skb))
887                 goto discard_it;
888
889         /* If the header checksum is incorrect, drop packet and return */
890         if (dccp_v4_verify_checksum(skb, skb->nh.iph->saddr,
891                                     skb->nh.iph->daddr) < 0) {
892                 LIMIT_NETDEBUG(KERN_WARNING "%s: incorrect header checksum\n",
893                                __FUNCTION__);
894                 goto discard_it;
895         }
896
897         dh = dccp_hdr(skb);
898
899         DCCP_SKB_CB(skb)->dccpd_seq  = dccp_hdr_seq(skb);
900         DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
901
902         dccp_pr_debug("%8.8s "
903                       "src=%u.%u.%u.%u@%-5d "
904                       "dst=%u.%u.%u.%u@%-5d seq=%llu",
905                       dccp_packet_name(dh->dccph_type),
906                       NIPQUAD(skb->nh.iph->saddr), ntohs(dh->dccph_sport),
907                       NIPQUAD(skb->nh.iph->daddr), ntohs(dh->dccph_dport),
908                       (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq);
909
910         if (dccp_packet_without_ack(skb)) {
911                 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
912                 dccp_pr_debug_cat("\n");
913         } else {
914                 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
915                 dccp_pr_debug_cat(", ack=%llu\n",
916                                   (unsigned long long)
917                                   DCCP_SKB_CB(skb)->dccpd_ack_seq);
918         }
919
920         /* Step 2:
921          *      Look up flow ID in table and get corresponding socket */
922         sk = __inet_lookup(&dccp_hashinfo,
923                            skb->nh.iph->saddr, dh->dccph_sport,
924                            skb->nh.iph->daddr, ntohs(dh->dccph_dport),
925                            inet_iif(skb));
926
927         /* 
928          * Step 2:
929          *      If no socket ...
930          *              Generate Reset(No Connection) unless P.type == Reset
931          *              Drop packet and return
932          */
933         if (sk == NULL) {
934                 dccp_pr_debug("failed to look up flow ID in table and "
935                               "get corresponding socket\n");
936                 goto no_dccp_socket;
937         }
938
939         /* 
940          * Step 2:
941          *      ... or S.state == TIMEWAIT,
942          *              Generate Reset(No Connection) unless P.type == Reset
943          *              Drop packet and return
944          */
945                
946         if (sk->sk_state == DCCP_TIME_WAIT) {
947                 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: "
948                               "do_time_wait\n");
949                 goto do_time_wait;
950         }
951
952         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
953                 goto discard_and_relse;
954         nf_reset(skb);
955
956         return sk_receive_skb(sk, skb);
957
958 no_dccp_socket:
959         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
960                 goto discard_it;
961         /*
962          * Step 2:
963          *              Generate Reset(No Connection) unless P.type == Reset
964          *              Drop packet and return
965          */
966         if (dh->dccph_type != DCCP_PKT_RESET) {
967                 DCCP_SKB_CB(skb)->dccpd_reset_code =
968                                         DCCP_RESET_CODE_NO_CONNECTION;
969                 dccp_v4_ctl_send_reset(skb);
970         }
971
972 discard_it:
973         /* Discard frame. */
974         kfree_skb(skb);
975         return 0;
976
977 discard_and_relse:
978         sock_put(sk);
979         goto discard_it;
980
981 do_time_wait:
982         inet_twsk_put((struct inet_timewait_sock *)sk);
983         goto no_dccp_socket;
984 }
985
986 struct inet_connection_sock_af_ops dccp_ipv4_af_ops = {
987         .queue_xmit     = ip_queue_xmit,
988         .send_check     = dccp_v4_send_check,
989         .rebuild_header = inet_sk_rebuild_header,
990         .conn_request   = dccp_v4_conn_request,
991         .syn_recv_sock  = dccp_v4_request_recv_sock,
992         .net_header_len = sizeof(struct iphdr),
993         .setsockopt     = ip_setsockopt,
994         .getsockopt     = ip_getsockopt,
995         .addr2sockaddr  = inet_csk_addr2sockaddr,
996         .sockaddr_len   = sizeof(struct sockaddr_in),
997 };
998
999 static int dccp_v4_init_sock(struct sock *sk)
1000 {
1001         const int err = dccp_init_sock(sk);
1002
1003         if (err == 0)
1004                 inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops;
1005         return err;
1006 }
1007
1008 static void dccp_v4_reqsk_destructor(struct request_sock *req)
1009 {
1010         kfree(inet_rsk(req)->opt);
1011 }
1012
1013 static struct request_sock_ops dccp_request_sock_ops = {
1014         .family         = PF_INET,
1015         .obj_size       = sizeof(struct dccp_request_sock),
1016         .rtx_syn_ack    = dccp_v4_send_response,
1017         .send_ack       = dccp_v4_reqsk_send_ack,
1018         .destructor     = dccp_v4_reqsk_destructor,
1019         .send_reset     = dccp_v4_ctl_send_reset,
1020 };
1021
1022 static struct timewait_sock_ops dccp_timewait_sock_ops = {
1023         .twsk_obj_size  = sizeof(struct inet_timewait_sock),
1024 };
1025
1026 struct proto dccp_prot = {
1027         .name                   = "DCCP",
1028         .owner                  = THIS_MODULE,
1029         .close                  = dccp_close,
1030         .connect                = dccp_v4_connect,
1031         .disconnect             = dccp_disconnect,
1032         .ioctl                  = dccp_ioctl,
1033         .init                   = dccp_v4_init_sock,
1034         .setsockopt             = dccp_setsockopt,
1035         .getsockopt             = dccp_getsockopt,
1036         .sendmsg                = dccp_sendmsg,
1037         .recvmsg                = dccp_recvmsg,
1038         .backlog_rcv            = dccp_v4_do_rcv,
1039         .hash                   = dccp_hash,
1040         .unhash                 = dccp_unhash,
1041         .accept                 = inet_csk_accept,
1042         .get_port               = dccp_v4_get_port,
1043         .shutdown               = dccp_shutdown,
1044         .destroy                = dccp_destroy_sock,
1045         .orphan_count           = &dccp_orphan_count,
1046         .max_header             = MAX_DCCP_HEADER,
1047         .obj_size               = sizeof(struct dccp_sock),
1048         .rsk_prot               = &dccp_request_sock_ops,
1049         .twsk_prot              = &dccp_timewait_sock_ops,
1050 };
1051
1052 EXPORT_SYMBOL_GPL(dccp_prot);