[DCCP]: Just reflow the source code to fit in 80 columns
[safe/jmp/linux-2.6] / net / dccp / proto.c
1 /*
2  *  net/dccp/proto.c
3  *
4  *  An implementation of the DCCP protocol
5  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6  *
7  *      This program is free software; you can redistribute it and/or modify it
8  *      under the terms of the GNU General Public License version 2 as
9  *      published by the Free Software Foundation.
10  */
11
12 #include <linux/config.h>
13 #include <linux/dccp.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/skbuff.h>
19 #include <linux/netdevice.h>
20 #include <linux/in.h>
21 #include <linux/if_arp.h>
22 #include <linux/init.h>
23 #include <linux/random.h>
24 #include <net/checksum.h>
25
26 #include <net/inet_common.h>
27 #include <net/ip.h>
28 #include <net/protocol.h>
29 #include <net/sock.h>
30 #include <net/xfrm.h>
31
32 #include <asm/semaphore.h>
33 #include <linux/spinlock.h>
34 #include <linux/timer.h>
35 #include <linux/delay.h>
36 #include <linux/poll.h>
37 #include <linux/dccp.h>
38
39 #include "ccid.h"
40 #include "dccp.h"
41
42 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics);
43
44 atomic_t dccp_orphan_count = ATOMIC_INIT(0);
45
46 static struct net_protocol dccp_protocol = {
47         .handler        = dccp_v4_rcv,
48         .err_handler    = dccp_v4_err,
49 };
50
51 const char *dccp_packet_name(const int type)
52 {
53         static const char *dccp_packet_names[] = {
54                 [DCCP_PKT_REQUEST]  = "REQUEST",
55                 [DCCP_PKT_RESPONSE] = "RESPONSE",
56                 [DCCP_PKT_DATA]     = "DATA",
57                 [DCCP_PKT_ACK]      = "ACK",
58                 [DCCP_PKT_DATAACK]  = "DATAACK",
59                 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
60                 [DCCP_PKT_CLOSE]    = "CLOSE",
61                 [DCCP_PKT_RESET]    = "RESET",
62                 [DCCP_PKT_SYNC]     = "SYNC",
63                 [DCCP_PKT_SYNCACK]  = "SYNCACK",
64         };
65
66         if (type >= DCCP_NR_PKT_TYPES)
67                 return "INVALID";
68         else
69                 return dccp_packet_names[type];
70 }
71
72 EXPORT_SYMBOL_GPL(dccp_packet_name);
73
74 const char *dccp_state_name(const int state)
75 {
76         static char *dccp_state_names[] = {
77         [DCCP_OPEN]       = "OPEN",
78         [DCCP_REQUESTING] = "REQUESTING",
79         [DCCP_PARTOPEN]   = "PARTOPEN",
80         [DCCP_LISTEN]     = "LISTEN",
81         [DCCP_RESPOND]    = "RESPOND",
82         [DCCP_CLOSING]    = "CLOSING",
83         [DCCP_TIME_WAIT]  = "TIME_WAIT",
84         [DCCP_CLOSED]     = "CLOSED",
85         };
86
87         if (state >= DCCP_MAX_STATES)
88                 return "INVALID STATE!";
89         else
90                 return dccp_state_names[state];
91 }
92
93 EXPORT_SYMBOL_GPL(dccp_state_name);
94
95 static inline int dccp_listen_start(struct sock *sk)
96 {
97         dccp_sk(sk)->dccps_role = DCCP_ROLE_LISTEN;
98         return inet_csk_listen_start(sk, TCP_SYNQ_HSIZE);
99 }
100
101 int dccp_disconnect(struct sock *sk, int flags)
102 {
103         struct inet_connection_sock *icsk = inet_csk(sk);
104         struct inet_sock *inet = inet_sk(sk);
105         int err = 0;
106         const int old_state = sk->sk_state;
107
108         if (old_state != DCCP_CLOSED)
109                 dccp_set_state(sk, DCCP_CLOSED);
110
111         /* ABORT function of RFC793 */
112         if (old_state == DCCP_LISTEN) {
113                 inet_csk_listen_stop(sk);
114         /* FIXME: do the active reset thing */
115         } else if (old_state == DCCP_REQUESTING)
116                 sk->sk_err = ECONNRESET;
117
118         dccp_clear_xmit_timers(sk);
119         __skb_queue_purge(&sk->sk_receive_queue);
120         if (sk->sk_send_head != NULL) {
121                 __kfree_skb(sk->sk_send_head);
122                 sk->sk_send_head = NULL;
123         }
124
125         inet->dport = 0;
126
127         if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
128                 inet_reset_saddr(sk);
129
130         sk->sk_shutdown = 0;
131         sock_reset_flag(sk, SOCK_DONE);
132
133         icsk->icsk_backoff = 0;
134         inet_csk_delack_init(sk);
135         __sk_dst_reset(sk);
136
137         BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
138
139         sk->sk_error_report(sk);
140         return err;
141 }
142
143 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
144 {
145         dccp_pr_debug("entry\n");
146         return -ENOIOCTLCMD;
147 }
148
149 int dccp_setsockopt(struct sock *sk, int level, int optname,
150                     char *optval, int optlen)
151 {
152         dccp_pr_debug("entry\n");
153
154         if (level != SOL_DCCP)
155                 return ip_setsockopt(sk, level, optname, optval, optlen);
156
157         return -EOPNOTSUPP;
158 }
159
160 int dccp_getsockopt(struct sock *sk, int level, int optname,
161                     char *optval, int *optlen)
162 {
163         dccp_pr_debug("entry\n");
164
165         if (level != SOL_DCCP)
166                 return ip_getsockopt(sk, level, optname, optval, optlen);
167
168         return -EOPNOTSUPP;
169 }
170
171 int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
172                  size_t len)
173 {
174         const struct dccp_sock *dp = dccp_sk(sk);
175         const int flags = msg->msg_flags;
176         const int noblock = flags & MSG_DONTWAIT;
177         struct sk_buff *skb;
178         int rc, size;
179         long timeo;
180
181         if (len > dp->dccps_mss_cache)
182                 return -EMSGSIZE;
183
184         lock_sock(sk);
185         timeo = sock_sndtimeo(sk, noblock);
186
187         /*
188          * We have to use sk_stream_wait_connect here to set sk_write_pending,
189          * so that the trick in dccp_rcv_request_sent_state_process.
190          */
191         /* Wait for a connection to finish. */
192         if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN | DCCPF_CLOSING))
193                 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
194                         goto out_release;
195
196         size = sk->sk_prot->max_header + len;
197         release_sock(sk);
198         skb = sock_alloc_send_skb(sk, size, noblock, &rc);
199         lock_sock(sk);
200         if (skb == NULL)
201                 goto out_release;
202
203         skb_reserve(skb, sk->sk_prot->max_header);
204         rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
205         if (rc != 0)
206                 goto out_discard;
207
208         rc = dccp_write_xmit(sk, skb, len);
209 out_release:
210         release_sock(sk);
211         return rc ? : len;
212 out_discard:
213         kfree_skb(skb);
214         goto out_release;
215 }
216
217 EXPORT_SYMBOL(dccp_sendmsg);
218
219 int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
220                  size_t len, int nonblock, int flags, int *addr_len)
221 {
222         const struct dccp_hdr *dh;
223         int copied = 0;
224         unsigned long used;
225         int err;
226         int target;             /* Read at least this many bytes */
227         long timeo;
228
229         lock_sock(sk);
230
231         err = -ENOTCONN;
232         if (sk->sk_state == DCCP_LISTEN)
233                 goto out;
234
235         timeo = sock_rcvtimeo(sk, nonblock);
236
237         /* Urgent data needs to be handled specially. */
238         if (flags & MSG_OOB)
239                 goto recv_urg;
240
241         /* FIXME */
242 #if 0
243         seq = &tp->copied_seq;
244         if (flags & MSG_PEEK) {
245                 peek_seq = tp->copied_seq;
246                 seq = &peek_seq;
247         }
248 #endif
249
250         target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
251
252         do {
253                 struct sk_buff *skb;
254                 u32 offset;
255
256         /* FIXME */
257 #if 0
258                 /*
259                  * Are we at urgent data? Stop if we have read anything or
260                  * have SIGURG pending.
261                  */
262                 if (tp->urg_data && tp->urg_seq == *seq) {
263                         if (copied)
264                                 break;
265                         if (signal_pending(current)) {
266                                 copied = timeo ? sock_intr_errno(timeo) :
267                                                  -EAGAIN;
268                                 break;
269                         }
270                 }
271 #endif
272
273                 /* Next get a buffer. */
274
275                 skb = skb_peek(&sk->sk_receive_queue);
276                 do {
277                         if (!skb)
278                                 break;
279
280                         offset = 0;
281                         dh = dccp_hdr(skb);
282
283                         if (dh->dccph_type == DCCP_PKT_DATA ||
284                             dh->dccph_type == DCCP_PKT_DATAACK)
285                                 goto found_ok_skb;
286
287                         if (dh->dccph_type == DCCP_PKT_RESET ||
288                             dh->dccph_type == DCCP_PKT_CLOSE) {
289                                 dccp_pr_debug("found fin ok!\n");
290                                 goto found_fin_ok;
291                         }
292                         dccp_pr_debug("packet_type=%s\n",
293                                       dccp_packet_name(dh->dccph_type));
294                         BUG_TRAP(flags & MSG_PEEK);
295                         skb = skb->next;
296                 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
297
298                 /* Well, if we have backlog, try to process it now yet. */
299                 if (copied >= target && !sk->sk_backlog.tail)
300                         break;
301
302                 if (copied) {
303                         if (sk->sk_err ||
304                             sk->sk_state == DCCP_CLOSED ||
305                             (sk->sk_shutdown & RCV_SHUTDOWN) ||
306                             !timeo ||
307                             signal_pending(current) ||
308                             (flags & MSG_PEEK))
309                                 break;
310                 } else {
311                         if (sock_flag(sk, SOCK_DONE))
312                                 break;
313
314                         if (sk->sk_err) {
315                                 copied = sock_error(sk);
316                                 break;
317                         }
318
319                         if (sk->sk_shutdown & RCV_SHUTDOWN)
320                                 break;
321
322                         if (sk->sk_state == DCCP_CLOSED) {
323                                 if (!sock_flag(sk, SOCK_DONE)) {
324                                         /* This occurs when user tries to read
325                                          * from never connected socket.
326                                          */
327                                         copied = -ENOTCONN;
328                                         break;
329                                 }
330                                 break;
331                         }
332
333                         if (!timeo) {
334                                 copied = -EAGAIN;
335                                 break;
336                         }
337
338                         if (signal_pending(current)) {
339                                 copied = sock_intr_errno(timeo);
340                                 break;
341                         }
342                 }
343
344                 /* FIXME: cleanup_rbuf(sk, copied); */
345
346                 if (copied >= target) {
347                         /* Do not sleep, just process backlog. */
348                         release_sock(sk);
349                         lock_sock(sk);
350                 } else
351                         sk_wait_data(sk, &timeo);
352
353                 continue;
354
355         found_ok_skb:
356                 /* Ok so how much can we use? */
357                 used = skb->len - offset;
358                 if (len < used)
359                         used = len;
360
361                 if (!(flags & MSG_TRUNC)) {
362                         err = skb_copy_datagram_iovec(skb, offset,
363                                                       msg->msg_iov, used);
364                         if (err) {
365                                 /* Exception. Bailout! */
366                                 if (!copied)
367                                         copied = -EFAULT;
368                                 break;
369                         }
370                 }
371
372                 copied += used;
373                 len -= used;
374
375                 /* FIXME: tcp_rcv_space_adjust(sk); */
376
377 //skip_copy:
378                 if (used + offset < skb->len)
379                         continue;
380
381                 if (!(flags & MSG_PEEK))
382                         sk_eat_skb(sk, skb);
383                 continue;
384         found_fin_ok:
385                 if (!(flags & MSG_PEEK))
386                         sk_eat_skb(sk, skb);
387                 break;
388                 
389         } while (len > 0);
390
391         /* According to UNIX98, msg_name/msg_namelen are ignored
392          * on connected socket. I was just happy when found this 8) --ANK
393          */
394
395         /* Clean up data we have read: This will do ACK frames. */
396         /* FIXME: cleanup_rbuf(sk, copied); */
397
398         release_sock(sk);
399         return copied;
400
401 out:
402         release_sock(sk);
403         return err;
404
405 recv_urg:
406         /* FIXME: err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len); */
407         goto out;
408 }
409
410 static int inet_dccp_listen(struct socket *sock, int backlog)
411 {
412         struct sock *sk = sock->sk;
413         unsigned char old_state;
414         int err;
415
416         lock_sock(sk);
417
418         err = -EINVAL;
419         if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
420                 goto out;
421
422         old_state = sk->sk_state;
423         if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
424                 goto out;
425
426         /* Really, if the socket is already in listen state
427          * we can only allow the backlog to be adjusted.
428          */
429         if (old_state != DCCP_LISTEN) {
430                 /*
431                  * FIXME: here it probably should be sk->sk_prot->listen_start
432                  * see tcp_listen_start
433                  */
434                 err = dccp_listen_start(sk);
435                 if (err)
436                         goto out;
437         }
438         sk->sk_max_ack_backlog = backlog;
439         err = 0;
440
441 out:
442         release_sock(sk);
443         return err;
444 }
445
446 static const unsigned char dccp_new_state[] = {
447         /* current state:   new state:      action:     */
448         [0]               = DCCP_CLOSED,
449         [DCCP_OPEN]       = DCCP_CLOSING | DCCP_ACTION_FIN,
450         [DCCP_REQUESTING] = DCCP_CLOSED,
451         [DCCP_PARTOPEN]   = DCCP_CLOSING | DCCP_ACTION_FIN,
452         [DCCP_LISTEN]     = DCCP_CLOSED,
453         [DCCP_RESPOND]    = DCCP_CLOSED,
454         [DCCP_CLOSING]    = DCCP_CLOSED,
455         [DCCP_TIME_WAIT]  = DCCP_CLOSED,
456         [DCCP_CLOSED]     = DCCP_CLOSED,
457 };
458
459 static int dccp_close_state(struct sock *sk)
460 {
461         const int next = dccp_new_state[sk->sk_state];
462         const int ns = next & DCCP_STATE_MASK;
463
464         if (ns != sk->sk_state)
465                 dccp_set_state(sk, ns);
466
467         return next & DCCP_ACTION_FIN;
468 }
469
470 void dccp_close(struct sock *sk, long timeout)
471 {
472         struct sk_buff *skb;
473
474         lock_sock(sk);
475
476         sk->sk_shutdown = SHUTDOWN_MASK;
477
478         if (sk->sk_state == DCCP_LISTEN) {
479                 dccp_set_state(sk, DCCP_CLOSED);
480
481                 /* Special case. */
482                 inet_csk_listen_stop(sk);
483
484                 goto adjudge_to_death;
485         }
486
487         /*
488          * We need to flush the recv. buffs.  We do this only on the
489          * descriptor close, not protocol-sourced closes, because the
490           *reader process may not have drained the data yet!
491          */
492         /* FIXME: check for unread data */
493         while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
494                 __kfree_skb(skb);
495         }
496
497         if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
498                 /* Check zero linger _after_ checking for unread data. */
499                 sk->sk_prot->disconnect(sk, 0);
500         } else if (dccp_close_state(sk)) {
501                 dccp_send_close(sk);
502         }
503
504         sk_stream_wait_close(sk, timeout);
505
506 adjudge_to_death:
507         release_sock(sk);
508         /*
509          * Now socket is owned by kernel and we acquire BH lock
510          * to finish close. No need to check for user refs.
511          */
512         local_bh_disable();
513         bh_lock_sock(sk);
514         BUG_TRAP(!sock_owned_by_user(sk));
515
516         sock_hold(sk);
517         sock_orphan(sk);
518                                                 
519         if (sk->sk_state != DCCP_CLOSED)
520                 dccp_set_state(sk, DCCP_CLOSED);
521
522         atomic_inc(&dccp_orphan_count);
523         if (sk->sk_state == DCCP_CLOSED)
524                 inet_csk_destroy_sock(sk);
525
526         /* Otherwise, socket is reprieved until protocol close. */
527
528         bh_unlock_sock(sk);
529         local_bh_enable();
530         sock_put(sk);
531 }
532
533 void dccp_shutdown(struct sock *sk, int how)
534 {
535         dccp_pr_debug("entry\n");
536 }
537
538 struct proto_ops inet_dccp_ops = {
539         .family         = PF_INET,
540         .owner          = THIS_MODULE,
541         .release        = inet_release,
542         .bind           = inet_bind,
543         .connect        = inet_stream_connect,
544         .socketpair     = sock_no_socketpair,
545         .accept         = inet_accept,
546         .getname        = inet_getname,
547         .poll           = sock_no_poll,
548         .ioctl          = inet_ioctl,
549         /* FIXME: work on inet_listen to rename it to sock_common_listen */
550         .listen         = inet_dccp_listen,
551         .shutdown       = inet_shutdown,
552         .setsockopt     = sock_common_setsockopt,
553         .getsockopt     = sock_common_getsockopt,
554         .sendmsg        = inet_sendmsg,
555         .recvmsg        = sock_common_recvmsg,
556         .mmap           = sock_no_mmap,
557         .sendpage       = sock_no_sendpage,
558 };
559
560 extern struct net_proto_family inet_family_ops;
561
562 static struct inet_protosw dccp_v4_protosw = {
563         .type           = SOCK_DCCP,
564         .protocol       = IPPROTO_DCCP,
565         .prot           = &dccp_v4_prot,
566         .ops            = &inet_dccp_ops,
567         .capability     = -1,
568         .no_check       = 0,
569         .flags          = 0,
570 };
571
572 /*
573  * This is the global socket data structure used for responding to
574  * the Out-of-the-blue (OOTB) packets. A control sock will be created
575  * for this socket at the initialization time.
576  */
577 struct socket *dccp_ctl_socket;
578
579 static char dccp_ctl_socket_err_msg[] __initdata =
580         KERN_ERR "DCCP: Failed to create the control socket.\n";
581
582 static int __init dccp_ctl_sock_init(void)
583 {
584         int rc = sock_create_kern(PF_INET, SOCK_DCCP, IPPROTO_DCCP,
585                                   &dccp_ctl_socket);
586         if (rc < 0)
587                 printk(dccp_ctl_socket_err_msg);
588         else {
589                 dccp_ctl_socket->sk->sk_allocation = GFP_ATOMIC;
590                 inet_sk(dccp_ctl_socket->sk)->uc_ttl = -1;
591
592                 /* Unhash it so that IP input processing does not even
593                  * see it, we do not wish this socket to see incoming
594                  * packets.
595                  */
596                 dccp_ctl_socket->sk->sk_prot->unhash(dccp_ctl_socket->sk);
597         }
598
599         return rc;
600 }
601
602 static void __exit dccp_ctl_sock_exit(void)
603 {
604         if (dccp_ctl_socket != NULL)
605                 sock_release(dccp_ctl_socket);
606 }
607
608 static int __init init_dccp_v4_mibs(void)
609 {
610         int rc = -ENOMEM;
611
612         dccp_statistics[0] = alloc_percpu(struct dccp_mib);
613         if (dccp_statistics[0] == NULL)
614                 goto out;
615
616         dccp_statistics[1] = alloc_percpu(struct dccp_mib);
617         if (dccp_statistics[1] == NULL)
618                 goto out_free_one;
619
620         rc = 0;
621 out:
622         return rc;
623 out_free_one:
624         free_percpu(dccp_statistics[0]);
625         dccp_statistics[0] = NULL;
626         goto out;
627
628 }
629
630 static int thash_entries;
631 module_param(thash_entries, int, 0444);
632 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
633
634 int dccp_debug;
635 module_param(dccp_debug, int, 0444);
636 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
637
638 static int __init dccp_init(void)
639 {
640         unsigned long goal;
641         int ehash_order, bhash_order, i;
642         int rc = proto_register(&dccp_v4_prot, 1);
643
644         if (rc)
645                 goto out;
646
647         dccp_hashinfo.bind_bucket_cachep =
648                 kmem_cache_create("dccp_bind_bucket",
649                                   sizeof(struct inet_bind_bucket), 0,
650                                   SLAB_HWCACHE_ALIGN, NULL, NULL);
651         if (!dccp_hashinfo.bind_bucket_cachep)
652                 goto out_proto_unregister;
653
654         /*
655          * Size and allocate the main established and bind bucket
656          * hash tables.
657          *
658          * The methodology is similar to that of the buffer cache.
659          */
660         if (num_physpages >= (128 * 1024))
661                 goal = num_physpages >> (21 - PAGE_SHIFT);
662         else
663                 goal = num_physpages >> (23 - PAGE_SHIFT);
664
665         if (thash_entries)
666                 goal = (thash_entries *
667                         sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
668         for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
669                 ;
670         do {
671                 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
672                                         sizeof(struct inet_ehash_bucket);
673                 dccp_hashinfo.ehash_size >>= 1;
674                 while (dccp_hashinfo.ehash_size &
675                        (dccp_hashinfo.ehash_size - 1))
676                         dccp_hashinfo.ehash_size--;
677                 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
678                         __get_free_pages(GFP_ATOMIC, ehash_order);
679         } while (!dccp_hashinfo.ehash && --ehash_order > 0);
680
681         if (!dccp_hashinfo.ehash) {
682                 printk(KERN_CRIT "Failed to allocate DCCP "
683                                  "established hash table\n");
684                 goto out_free_bind_bucket_cachep;
685         }
686
687         for (i = 0; i < (dccp_hashinfo.ehash_size << 1); i++) {
688                 rwlock_init(&dccp_hashinfo.ehash[i].lock);
689                 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
690         }
691
692         bhash_order = ehash_order;
693
694         do {
695                 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
696                                         sizeof(struct inet_bind_hashbucket);
697                 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
698                     bhash_order > 0)
699                         continue;
700                 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
701                         __get_free_pages(GFP_ATOMIC, bhash_order);
702         } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
703
704         if (!dccp_hashinfo.bhash) {
705                 printk(KERN_CRIT "Failed to allocate DCCP bind hash table\n");
706                 goto out_free_dccp_ehash;
707         }
708
709         for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
710                 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
711                 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
712         }
713
714         if (init_dccp_v4_mibs())
715                 goto out_free_dccp_bhash;
716
717         rc = -EAGAIN;
718         if (inet_add_protocol(&dccp_protocol, IPPROTO_DCCP))
719                 goto out_free_dccp_v4_mibs;
720
721         inet_register_protosw(&dccp_v4_protosw);
722
723         rc = dccp_ctl_sock_init();
724         if (rc)
725                 goto out_unregister_protosw;
726 out:
727         return rc;
728 out_unregister_protosw:
729         inet_unregister_protosw(&dccp_v4_protosw);
730         inet_del_protocol(&dccp_protocol, IPPROTO_DCCP);
731 out_free_dccp_v4_mibs:
732         free_percpu(dccp_statistics[0]);
733         free_percpu(dccp_statistics[1]);
734         dccp_statistics[0] = dccp_statistics[1] = NULL;
735 out_free_dccp_bhash:
736         free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
737         dccp_hashinfo.bhash = NULL;
738 out_free_dccp_ehash:
739         free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
740         dccp_hashinfo.ehash = NULL;
741 out_free_bind_bucket_cachep:
742         kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
743         dccp_hashinfo.bind_bucket_cachep = NULL;
744 out_proto_unregister:
745         proto_unregister(&dccp_v4_prot);
746         goto out;
747 }
748
749 static const char dccp_del_proto_err_msg[] __exitdata =
750         KERN_ERR "can't remove dccp net_protocol\n";
751
752 static void __exit dccp_fini(void)
753 {
754         dccp_ctl_sock_exit();
755
756         inet_unregister_protosw(&dccp_v4_protosw);
757
758         if (inet_del_protocol(&dccp_protocol, IPPROTO_DCCP) < 0)
759                 printk(dccp_del_proto_err_msg);
760
761         /* Free the control endpoint.  */
762         sock_release(dccp_ctl_socket);
763
764         proto_unregister(&dccp_v4_prot);
765
766         kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
767 }
768
769 module_init(dccp_init);
770 module_exit(dccp_fini);
771
772 /*
773  * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
774  * values directly, Also cover the case where the protocol is not specified,
775  * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP
776  */
777 MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-33-type-6");
778 MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-0-type-6");
779 MODULE_LICENSE("GPL");
780 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
781 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");