4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/dccp.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
17 #include <net/inet_sock.h>
24 static inline void dccp_event_ack_sent(struct sock *sk)
26 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
29 static void dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
31 skb_set_owner_w(skb, sk);
32 WARN_ON(sk->sk_send_head);
33 sk->sk_send_head = skb;
37 * All SKB's seen here are completely headerless. It is our
38 * job to build the DCCP header, and pass the packet down to
39 * IP so it can do the same plus pass the packet off to the
42 static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
44 if (likely(skb != NULL)) {
45 const struct inet_sock *inet = inet_sk(sk);
46 const struct inet_connection_sock *icsk = inet_csk(sk);
47 struct dccp_sock *dp = dccp_sk(sk);
48 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
50 /* XXX For now we're using only 48 bits sequence numbers */
51 const u32 dccp_header_size = sizeof(*dh) +
52 sizeof(struct dccp_hdr_ext) +
53 dccp_packet_hdr_len(dcb->dccpd_type);
55 u64 ackno = dp->dccps_gsr;
57 * Increment GSS here already in case the option code needs it.
58 * Update GSS for real only if option processing below succeeds.
60 dcb->dccpd_seq = ADD48(dp->dccps_gss, 1);
62 switch (dcb->dccpd_type) {
66 case DCCP_PKT_DATAACK:
70 case DCCP_PKT_REQUEST:
72 /* Use ISS on the first (non-retransmitted) Request. */
73 if (icsk->icsk_retransmits == 0)
74 dcb->dccpd_seq = dp->dccps_iss;
78 case DCCP_PKT_SYNCACK:
79 ackno = dcb->dccpd_ack_seq;
83 * Set owner/destructor: some skbs are allocated via
84 * alloc_skb (e.g. when retransmission may happen).
85 * Only Data, DataAck, and Reset packets should come
86 * through here with skb->sk set.
89 skb_set_owner_w(skb, sk);
93 if (dccp_insert_options(sk, skb)) {
99 /* Build DCCP header and checksum it. */
100 dh = dccp_zeroed_hdr(skb, dccp_header_size);
101 dh->dccph_type = dcb->dccpd_type;
102 dh->dccph_sport = inet->sport;
103 dh->dccph_dport = inet->dport;
104 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
105 dh->dccph_ccval = dcb->dccpd_ccval;
106 dh->dccph_cscov = dp->dccps_pcslen;
107 /* XXX For now we're using only 48 bits sequence numbers */
110 dccp_update_gss(sk, dcb->dccpd_seq);
111 dccp_hdr_set_seq(dh, dp->dccps_gss);
113 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
115 switch (dcb->dccpd_type) {
116 case DCCP_PKT_REQUEST:
117 dccp_hdr_request(skb)->dccph_req_service =
120 * Limit Ack window to ISS <= P.ackno <= GSS, so that
121 * only Responses to Requests we sent are considered.
123 dp->dccps_awl = dp->dccps_iss;
126 dccp_hdr_reset(skb)->dccph_reset_code =
127 dcb->dccpd_reset_code;
131 icsk->icsk_af_ops->send_check(sk, 0, skb);
134 dccp_event_ack_sent(sk);
136 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
138 err = icsk->icsk_af_ops->queue_xmit(skb, 0);
139 return net_xmit_eval(err);
145 * dccp_determine_ccmps - Find out about CCID-specfic packet-size limits
146 * We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.),
147 * since the RX CCID is restricted to feedback packets (Acks), which are small
148 * in comparison with the data traffic. A value of 0 means "no current CCMPS".
150 static u32 dccp_determine_ccmps(const struct dccp_sock *dp)
152 const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid;
154 if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL)
156 return tx_ccid->ccid_ops->ccid_ccmps;
159 unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
161 struct inet_connection_sock *icsk = inet_csk(sk);
162 struct dccp_sock *dp = dccp_sk(sk);
163 u32 ccmps = dccp_determine_ccmps(dp);
164 u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu;
166 /* Account for header lengths and IPv4/v6 option overhead */
167 cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len +
168 sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext));
171 * Leave enough headroom for common DCCP header options.
172 * This only considers options which may appear on DCCP-Data packets, as
173 * per table 3 in RFC 4340, 5.8. When running out of space for other
174 * options (eg. Ack Vector which can take up to 255 bytes), it is better
175 * to schedule a separate Ack. Thus we leave headroom for the following:
176 * - 1 byte for Slow Receiver (11.6)
177 * - 6 bytes for Timestamp (13.1)
178 * - 10 bytes for Timestamp Echo (13.3)
179 * - 8 bytes for NDP count (7.7, when activated)
180 * - 6 bytes for Data Checksum (9.3)
181 * - %DCCPAV_MIN_OPTLEN bytes for Ack Vector size (11.4, when enabled)
183 cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 +
184 (dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4);
186 /* And store cached results */
187 icsk->icsk_pmtu_cookie = pmtu;
188 dp->dccps_mss_cache = cur_mps;
193 EXPORT_SYMBOL_GPL(dccp_sync_mss);
195 void dccp_write_space(struct sock *sk)
197 read_lock(&sk->sk_callback_lock);
199 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
200 wake_up_interruptible(sk->sk_sleep);
201 /* Should agree with poll, otherwise some programs break */
202 if (sock_writeable(sk))
203 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
205 read_unlock(&sk->sk_callback_lock);
209 * dccp_wait_for_ccid - Await CCID send permission
210 * @sk: socket to wait for
211 * @delay: timeout in jiffies
212 * This is used by CCIDs which need to delay the send time in process context.
214 static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
219 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
220 sk->sk_write_pending++;
223 remaining = schedule_timeout(delay);
226 sk->sk_write_pending--;
227 finish_wait(sk->sk_sleep, &wait);
229 if (signal_pending(current) || sk->sk_err)
235 * dccp_xmit_packet - Send data packet under control of CCID
236 * Transmits next-queued payload and informs CCID to account for the packet.
238 static void dccp_xmit_packet(struct sock *sk)
241 struct dccp_sock *dp = dccp_sk(sk);
242 struct sk_buff *skb = skb_dequeue(&sk->sk_write_queue);
244 if (unlikely(skb == NULL))
248 if (sk->sk_state == DCCP_PARTOPEN) {
249 const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
251 * See 8.1.5 - Handshake Completion.
253 * For robustness we resend Confirm options until the client has
254 * entered OPEN. During the initial feature negotiation, the MPS
255 * is smaller than usual, reduced by the Change/Confirm options.
257 if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
258 DCCP_WARN("Payload too large (%d) for featneg.\n", len);
260 dccp_feat_list_purge(&dp->dccps_featneg);
263 inet_csk_schedule_ack(sk);
264 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
265 inet_csk(sk)->icsk_rto,
267 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
268 } else if (dccp_ack_pending(sk)) {
269 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
271 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
274 err = dccp_transmit_skb(sk, skb);
276 dccp_pr_debug("transmit_skb() returned err=%d\n", err);
278 * Register this one as sent even if an error occurred. To the remote
279 * end a local packet drop is indistinguishable from network loss, i.e.
280 * any local drop will eventually be reported via receiver feedback.
282 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
285 * If the CCID needs to transfer additional header options out-of-band
286 * (e.g. Ack Vectors or feature-negotiation options), it activates this
287 * flag to schedule a Sync. The Sync will automatically incorporate all
288 * currently pending header options, thus clearing the backlog.
290 if (dp->dccps_sync_scheduled)
291 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
295 * dccp_flush_write_queue - Drain queue at end of connection
296 * Since dccp_sendmsg queues packets without waiting for them to be sent, it may
297 * happen that the TX queue is not empty at the end of a connection. We give the
298 * HC-sender CCID a grace period of up to @time_budget jiffies. If this function
299 * returns with a non-empty write queue, it will be purged later.
301 void dccp_flush_write_queue(struct sock *sk, long *time_budget)
303 struct dccp_sock *dp = dccp_sk(sk);
307 while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
308 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
310 switch (ccid_packet_dequeue_eval(rc)) {
311 case CCID_PACKET_WILL_DEQUEUE_LATER:
313 * If the CCID determines when to send, the next sending
314 * time is unknown or the CCID may not even send again
315 * (e.g. remote host crashes or lost Ack packets).
317 DCCP_WARN("CCID did not manage to send all packets\n");
319 case CCID_PACKET_DELAY:
320 delay = msecs_to_jiffies(rc);
321 if (delay > *time_budget)
323 rc = dccp_wait_for_ccid(sk, delay);
326 *time_budget -= (delay - rc);
327 /* check again if we can send now */
329 case CCID_PACKET_SEND_AT_ONCE:
330 dccp_xmit_packet(sk);
332 case CCID_PACKET_ERR:
333 skb_dequeue(&sk->sk_write_queue);
335 dccp_pr_debug("packet discarded due to err=%ld\n", rc);
340 void dccp_write_xmit(struct sock *sk)
342 struct dccp_sock *dp = dccp_sk(sk);
345 while ((skb = skb_peek(&sk->sk_write_queue))) {
346 int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
348 switch (ccid_packet_dequeue_eval(rc)) {
349 case CCID_PACKET_WILL_DEQUEUE_LATER:
351 case CCID_PACKET_DELAY:
352 sk_reset_timer(sk, &dp->dccps_xmit_timer,
353 jiffies + msecs_to_jiffies(rc));
355 case CCID_PACKET_SEND_AT_ONCE:
356 dccp_xmit_packet(sk);
358 case CCID_PACKET_ERR:
359 skb_dequeue(&sk->sk_write_queue);
361 dccp_pr_debug("packet discarded due to err=%d\n", rc);
367 * dccp_retransmit_skb - Retransmit Request, Close, or CloseReq packets
368 * There are only four retransmittable packet types in DCCP:
369 * - Request in client-REQUEST state (sec. 8.1.1),
370 * - CloseReq in server-CLOSEREQ state (sec. 8.3),
371 * - Close in node-CLOSING state (sec. 8.3),
372 * - Acks in client-PARTOPEN state (sec. 8.1.5, handled by dccp_delack_timer()).
373 * This function expects sk->sk_send_head to contain the original skb.
375 int dccp_retransmit_skb(struct sock *sk)
377 WARN_ON(sk->sk_send_head == NULL);
379 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
380 return -EHOSTUNREACH; /* Routing failure or similar. */
382 /* this count is used to distinguish original and retransmitted skb */
383 inet_csk(sk)->icsk_retransmits++;
385 return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
388 struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
389 struct request_sock *req)
392 struct dccp_request_sock *dreq;
393 const u32 dccp_header_size = sizeof(struct dccp_hdr) +
394 sizeof(struct dccp_hdr_ext) +
395 sizeof(struct dccp_hdr_response);
396 struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
401 /* Reserve space for headers. */
402 skb_reserve(skb, sk->sk_prot->max_header);
404 skb->dst = dst_clone(dst);
406 dreq = dccp_rsk(req);
407 if (inet_rsk(req)->acked) /* increase ISS upon retransmission */
408 dccp_inc_seqno(&dreq->dreq_iss);
409 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
410 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss;
412 /* Resolve feature dependencies resulting from choice of CCID */
413 if (dccp_feat_server_ccid_dependencies(dreq))
414 goto response_failed;
416 if (dccp_insert_options_rsk(dreq, skb))
417 goto response_failed;
419 /* Build and checksum header */
420 dh = dccp_zeroed_hdr(skb, dccp_header_size);
422 dh->dccph_sport = inet_sk(sk)->sport;
423 dh->dccph_dport = inet_rsk(req)->rmt_port;
424 dh->dccph_doff = (dccp_header_size +
425 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
426 dh->dccph_type = DCCP_PKT_RESPONSE;
428 dccp_hdr_set_seq(dh, dreq->dreq_iss);
429 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr);
430 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
432 dccp_csum_outgoing(skb);
434 /* We use `acked' to remember that a Response was already sent. */
435 inet_rsk(req)->acked = 1;
436 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
443 EXPORT_SYMBOL_GPL(dccp_make_response);
445 /* answer offending packet in @rcv_skb with Reset from control socket @ctl */
446 struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb)
448 struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh;
449 struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb);
450 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
451 sizeof(struct dccp_hdr_ext) +
452 sizeof(struct dccp_hdr_reset);
453 struct dccp_hdr_reset *dhr;
456 skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
460 skb_reserve(skb, sk->sk_prot->max_header);
462 /* Swap the send and the receive. */
463 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
464 dh->dccph_type = DCCP_PKT_RESET;
465 dh->dccph_sport = rxdh->dccph_dport;
466 dh->dccph_dport = rxdh->dccph_sport;
467 dh->dccph_doff = dccp_hdr_reset_len / 4;
470 dhr = dccp_hdr_reset(skb);
471 dhr->dccph_reset_code = dcb->dccpd_reset_code;
473 switch (dcb->dccpd_reset_code) {
474 case DCCP_RESET_CODE_PACKET_ERROR:
475 dhr->dccph_reset_data[0] = rxdh->dccph_type;
477 case DCCP_RESET_CODE_OPTION_ERROR: /* fall through */
478 case DCCP_RESET_CODE_MANDATORY_ERROR:
479 memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3);
483 * From RFC 4340, 8.3.1:
484 * If P.ackno exists, set R.seqno := P.ackno + 1.
485 * Else set R.seqno := 0.
487 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
488 dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1));
489 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq);
491 dccp_csum_outgoing(skb);
495 EXPORT_SYMBOL_GPL(dccp_ctl_make_reset);
497 /* send Reset on established socket, to close or abort the connection */
498 int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
502 * FIXME: what if rebuild_header fails?
503 * Should we be doing a rebuild_header here?
505 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk);
510 skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC);
514 /* Reserve space for headers and prepare control bits. */
515 skb_reserve(skb, sk->sk_prot->max_header);
516 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
517 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
519 return dccp_transmit_skb(sk, skb);
523 * Do all connect socket setups that can be done AF independent.
525 int dccp_connect(struct sock *sk)
528 struct dccp_sock *dp = dccp_sk(sk);
529 struct dst_entry *dst = __sk_dst_get(sk);
530 struct inet_connection_sock *icsk = inet_csk(sk);
533 sock_reset_flag(sk, SOCK_DONE);
535 dccp_sync_mss(sk, dst_mtu(dst));
537 /* do not connect if feature negotiation setup fails */
538 if (dccp_feat_finalise_settings(dccp_sk(sk)))
541 /* Initialise GAR as per 8.5; AWL/AWH are set in dccp_transmit_skb() */
542 dp->dccps_gar = dp->dccps_iss;
544 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
545 if (unlikely(skb == NULL))
548 /* Reserve space for headers. */
549 skb_reserve(skb, sk->sk_prot->max_header);
551 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
553 dccp_skb_entail(sk, skb);
554 dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL));
555 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
557 /* Timer for repeating the REQUEST until an answer. */
558 icsk->icsk_retransmits = 0;
559 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
560 icsk->icsk_rto, DCCP_RTO_MAX);
564 EXPORT_SYMBOL_GPL(dccp_connect);
566 void dccp_send_ack(struct sock *sk)
568 /* If we have been reset, we may not send again. */
569 if (sk->sk_state != DCCP_CLOSED) {
570 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
574 inet_csk_schedule_ack(sk);
575 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
576 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
582 /* Reserve space for headers */
583 skb_reserve(skb, sk->sk_prot->max_header);
584 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
585 dccp_transmit_skb(sk, skb);
589 EXPORT_SYMBOL_GPL(dccp_send_ack);
592 /* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */
593 void dccp_send_delayed_ack(struct sock *sk)
595 struct inet_connection_sock *icsk = inet_csk(sk);
597 * FIXME: tune this timer. elapsed time fixes the skew, so no problem
598 * with using 2s, and active senders also piggyback the ACK into a
599 * DATAACK packet, so this is really for quiescent senders.
601 unsigned long timeout = jiffies + 2 * HZ;
603 /* Use new timeout only if there wasn't a older one earlier. */
604 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
605 /* If delack timer was blocked or is about to expire,
608 * FIXME: check the "about to expire" part
610 if (icsk->icsk_ack.blocked) {
615 if (!time_before(timeout, icsk->icsk_ack.timeout))
616 timeout = icsk->icsk_ack.timeout;
618 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
619 icsk->icsk_ack.timeout = timeout;
620 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
624 void dccp_send_sync(struct sock *sk, const u64 ackno,
625 const enum dccp_pkt_type pkt_type)
628 * We are not putting this on the write queue, so
629 * dccp_transmit_skb() will set the ownership to this
632 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
635 /* FIXME: how to make sure the sync is sent? */
636 DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type));
640 /* Reserve space for headers and prepare control bits. */
641 skb_reserve(skb, sk->sk_prot->max_header);
642 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
643 DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
646 * Clear the flag in case the Sync was scheduled for out-of-band data,
647 * such as carrying a long Ack Vector.
649 dccp_sk(sk)->dccps_sync_scheduled = 0;
651 dccp_transmit_skb(sk, skb);
654 EXPORT_SYMBOL_GPL(dccp_send_sync);
657 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
658 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
661 void dccp_send_close(struct sock *sk, const int active)
663 struct dccp_sock *dp = dccp_sk(sk);
665 const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
667 skb = alloc_skb(sk->sk_prot->max_header, prio);
671 /* Reserve space for headers and prepare control bits. */
672 skb_reserve(skb, sk->sk_prot->max_header);
673 if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait)
674 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ;
676 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
679 dccp_skb_entail(sk, skb);
680 dccp_transmit_skb(sk, skb_clone(skb, prio));
682 * Retransmission timer for active-close: RFC 4340, 8.3 requires
683 * to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ
684 * state can be left. The initial timeout is 2 RTTs.
685 * Since RTT measurement is done by the CCIDs, there is no easy
686 * way to get an RTT sample. The fallback RTT from RFC 4340, 3.4
687 * is too low (200ms); we use a high value to avoid unnecessary
688 * retransmissions when the link RTT is > 0.2 seconds.
689 * FIXME: Let main module sample RTTs and use that instead.
691 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
692 DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
694 dccp_transmit_skb(sk, skb);