4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/config.h>
14 #include <linux/dccp.h>
15 #include <linux/skbuff.h>
22 static inline void dccp_event_ack_sent(struct sock *sk)
24 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
28 * All SKB's seen here are completely headerless. It is our
29 * job to build the DCCP header, and pass the packet down to
30 * IP so it can do the same plus pass the packet off to the
33 int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
35 if (likely(skb != NULL)) {
36 const struct inet_sock *inet = inet_sk(sk);
37 struct dccp_sock *dp = dccp_sk(sk);
38 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
40 /* XXX For now we're using only 48 bits sequence numbers */
41 const int dccp_header_size = sizeof(*dh) +
42 sizeof(struct dccp_hdr_ext) +
43 dccp_packet_hdr_len(dcb->dccpd_type);
45 u64 ackno = dp->dccps_gsr;
48 * FIXME: study DCCP_PKT_SYNC[ACK] to see what is the right
51 dccp_inc_seqno(&dp->dccps_gss);
53 dcb->dccpd_seq = dp->dccps_gss;
54 dccp_insert_options(sk, skb);
56 switch (dcb->dccpd_type) {
61 case DCCP_PKT_SYNCACK:
62 ackno = dcb->dccpd_seq;
66 skb->h.raw = skb_push(skb, dccp_header_size);
69 * Data packets are not cloned as they are never retransmitted
72 skb_set_owner_w(skb, sk);
74 /* Build DCCP header and checksum it. */
75 memset(dh, 0, dccp_header_size);
76 dh->dccph_type = dcb->dccpd_type;
77 dh->dccph_sport = inet->sport;
78 dh->dccph_dport = inet->dport;
79 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
80 dh->dccph_ccval = dcb->dccpd_ccval;
81 /* XXX For now we're using only 48 bits sequence numbers */
84 dp->dccps_awh = dp->dccps_gss;
85 dccp_hdr_set_seq(dh, dp->dccps_gss);
87 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
89 switch (dcb->dccpd_type) {
90 case DCCP_PKT_REQUEST:
91 dccp_hdr_request(skb)->dccph_req_service =
95 dccp_hdr_reset(skb)->dccph_reset_code =
96 dcb->dccpd_reset_code;
100 dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr,
103 if (dcb->dccpd_type == DCCP_PKT_ACK ||
104 dcb->dccpd_type == DCCP_PKT_DATAACK)
105 dccp_event_ack_sent(sk);
107 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
109 err = ip_queue_xmit(skb, 0);
113 /* NET_XMIT_CN is special. It does not guarantee,
114 * that this packet is lost. It tells that device
115 * is about to start to drop packets or already
116 * drops some packets of the same priority and
117 * invokes us to send less aggressively.
119 return err == NET_XMIT_CN ? 0 : err;
124 unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
126 struct dccp_sock *dp = dccp_sk(sk);
130 * FIXME: we really should be using the af_specific thing to support
132 * mss_now = pmtu - tp->af_specific->net_header_len -
133 * sizeof(struct dccp_hdr) - sizeof(struct dccp_hdr_ext);
135 mss_now = pmtu - sizeof(struct iphdr) - sizeof(struct dccp_hdr) -
136 sizeof(struct dccp_hdr_ext);
138 /* Now subtract optional transport overhead */
139 mss_now -= dp->dccps_ext_header_len;
142 * FIXME: this should come from the CCID infrastructure, where, say,
143 * TFRC will say it wants TIMESTAMPS, ELAPSED time, etc, for now lets
144 * put a rough estimate for NDP + TIMESTAMP + TIMESTAMP_ECHO + ELAPSED
145 * TIME + TFRC_OPT_LOSS_EVENT_RATE + TFRC_OPT_RECEIVE_RATE + padding to
146 * make it a multiple of 4
149 mss_now -= ((5 + 6 + 10 + 6 + 6 + 6 + 3) / 4) * 4;
151 /* And store cached results */
152 dp->dccps_pmtu_cookie = pmtu;
153 dp->dccps_mss_cache = mss_now;
158 int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, const int len)
160 const struct dccp_sock *dp = dccp_sk(sk);
161 int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, len);
164 const struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts;
165 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
167 if (sk->sk_state == DCCP_PARTOPEN) {
168 /* See 8.1.5. Handshake Completion */
169 inet_csk_schedule_ack(sk);
170 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
171 inet_csk(sk)->icsk_rto,
173 dcb->dccpd_type = DCCP_PKT_DATAACK;
175 * FIXME: we really should have a
176 * dccps_ack_pending or use icsk.
178 } else if (inet_csk_ack_scheduled(sk) ||
179 (dp->dccps_options.dccpo_send_ack_vector &&
180 ap->dccpap_buf_ackno != DCCP_MAX_SEQNO + 1 &&
181 ap->dccpap_ack_seqno == DCCP_MAX_SEQNO + 1))
182 dcb->dccpd_type = DCCP_PKT_DATAACK;
184 dcb->dccpd_type = DCCP_PKT_DATA;
186 err = dccp_transmit_skb(sk, skb);
187 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
193 int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
195 if (inet_sk_rebuild_header(sk) != 0)
196 return -EHOSTUNREACH; /* Routing failure or similar. */
198 return dccp_transmit_skb(sk, (skb_cloned(skb) ?
199 pskb_copy(skb, GFP_ATOMIC):
200 skb_clone(skb, GFP_ATOMIC)));
203 struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
204 struct request_sock *req)
207 const int dccp_header_size = sizeof(struct dccp_hdr) +
208 sizeof(struct dccp_hdr_ext) +
209 sizeof(struct dccp_hdr_response);
210 struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN +
216 /* Reserve space for headers. */
217 skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size);
219 skb->dst = dst_clone(dst);
222 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
223 DCCP_SKB_CB(skb)->dccpd_seq = dccp_rsk(req)->dreq_iss;
224 dccp_insert_options(sk, skb);
226 skb->h.raw = skb_push(skb, dccp_header_size);
229 memset(dh, 0, dccp_header_size);
231 dh->dccph_sport = inet_sk(sk)->sport;
232 dh->dccph_dport = inet_rsk(req)->rmt_port;
233 dh->dccph_doff = (dccp_header_size +
234 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
235 dh->dccph_type = DCCP_PKT_RESPONSE;
237 dccp_hdr_set_seq(dh, dccp_rsk(req)->dreq_iss);
238 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dccp_rsk(req)->dreq_isr);
240 dh->dccph_checksum = dccp_v4_checksum(skb, inet_rsk(req)->loc_addr,
241 inet_rsk(req)->rmt_addr);
243 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
247 struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
248 const enum dccp_reset_codes code)
252 struct dccp_sock *dp = dccp_sk(sk);
253 const int dccp_header_size = sizeof(struct dccp_hdr) +
254 sizeof(struct dccp_hdr_ext) +
255 sizeof(struct dccp_hdr_reset);
256 struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN +
262 /* Reserve space for headers. */
263 skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size);
265 skb->dst = dst_clone(dst);
268 dccp_inc_seqno(&dp->dccps_gss);
270 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
271 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
272 DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss;
273 dccp_insert_options(sk, skb);
275 skb->h.raw = skb_push(skb, dccp_header_size);
278 memset(dh, 0, dccp_header_size);
280 dh->dccph_sport = inet_sk(sk)->sport;
281 dh->dccph_dport = inet_sk(sk)->dport;
282 dh->dccph_doff = (dccp_header_size +
283 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
284 dh->dccph_type = DCCP_PKT_RESET;
286 dccp_hdr_set_seq(dh, dp->dccps_gss);
287 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr);
289 dccp_hdr_reset(skb)->dccph_reset_code = code;
291 dh->dccph_checksum = dccp_v4_checksum(skb, inet_sk(sk)->saddr,
294 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
299 * Do all connect socket setups that can be done AF independent.
301 static inline void dccp_connect_init(struct sock *sk)
303 struct dst_entry *dst = __sk_dst_get(sk);
304 struct inet_connection_sock *icsk = inet_csk(sk);
307 sock_reset_flag(sk, SOCK_DONE);
309 dccp_sync_mss(sk, dst_mtu(dst));
312 * FIXME: set dp->{dccps_swh,dccps_swl}, with
313 * something like dccp_inc_seq
316 icsk->icsk_retransmits = 0;
319 int dccp_connect(struct sock *sk)
322 struct inet_connection_sock *icsk = inet_csk(sk);
324 dccp_connect_init(sk);
326 skb = alloc_skb(MAX_DCCP_HEADER + 15, sk->sk_allocation);
327 if (unlikely(skb == NULL))
330 /* Reserve space for headers. */
331 skb_reserve(skb, MAX_DCCP_HEADER);
333 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
334 /* FIXME: set service to something meaningful, coming
336 DCCP_SKB_CB(skb)->dccpd_service = 0;
338 skb_set_owner_w(skb, sk);
340 BUG_TRAP(sk->sk_send_head == NULL);
341 sk->sk_send_head = skb;
342 dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL));
343 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
345 /* Timer for repeating the REQUEST until an answer. */
346 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
347 icsk->icsk_rto, DCCP_RTO_MAX);
351 void dccp_send_ack(struct sock *sk)
353 /* If we have been reset, we may not send again. */
354 if (sk->sk_state != DCCP_CLOSED) {
355 struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC);
358 inet_csk_schedule_ack(sk);
359 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
360 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
366 /* Reserve space for headers */
367 skb_reserve(skb, MAX_DCCP_HEADER);
369 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
370 skb_set_owner_w(skb, sk);
371 dccp_transmit_skb(sk, skb);
375 EXPORT_SYMBOL_GPL(dccp_send_ack);
377 void dccp_send_delayed_ack(struct sock *sk)
379 struct inet_connection_sock *icsk = inet_csk(sk);
381 * FIXME: tune this timer. elapsed time fixes the skew, so no problem
382 * with using 2s, and active senders also piggyback the ACK into a
383 * DATAACK packet, so this is really for quiescent senders.
385 unsigned long timeout = jiffies + 2 * HZ;
387 /* Use new timeout only if there wasn't a older one earlier. */
388 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
389 /* If delack timer was blocked or is about to expire,
392 * FIXME: check the "about to expire" part
394 if (icsk->icsk_ack.blocked) {
399 if (!time_before(timeout, icsk->icsk_ack.timeout))
400 timeout = icsk->icsk_ack.timeout;
402 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
403 icsk->icsk_ack.timeout = timeout;
404 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
407 void dccp_send_sync(struct sock *sk, const u64 seq,
408 const enum dccp_pkt_type pkt_type)
411 * We are not putting this on the write queue, so
412 * dccp_transmit_skb() will set the ownership to this
415 struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC);
418 /* FIXME: how to make sure the sync is sent? */
421 /* Reserve space for headers and prepare control bits. */
422 skb_reserve(skb, MAX_DCCP_HEADER);
424 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
425 DCCP_SKB_CB(skb)->dccpd_seq = seq;
427 skb_set_owner_w(skb, sk);
428 dccp_transmit_skb(sk, skb);
432 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
433 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
436 void dccp_send_close(struct sock *sk)
438 struct dccp_sock *dp = dccp_sk(sk);
441 /* Socket is locked, keep trying until memory is available. */
443 skb = alloc_skb(sk->sk_prot->max_header, GFP_KERNEL);
449 /* Reserve space for headers and prepare control bits. */
450 skb_reserve(skb, sk->sk_prot->max_header);
452 DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ?
453 DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ;
455 skb_set_owner_w(skb, sk);
456 dccp_transmit_skb(sk, skb);
458 ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
459 ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);