4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/config.h>
14 #include <linux/dccp.h>
15 #include <linux/skbuff.h>
22 static inline void dccp_event_ack_sent(struct sock *sk)
24 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
28 * All SKB's seen here are completely headerless. It is our
29 * job to build the DCCP header, and pass the packet down to
30 * IP so it can do the same plus pass the packet off to the
33 int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
35 if (likely(skb != NULL)) {
36 const struct inet_sock *inet = inet_sk(sk);
37 struct dccp_sock *dp = dccp_sk(sk);
38 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
40 /* XXX For now we're using only 48 bits sequence numbers */
41 const int dccp_header_size = sizeof(*dh) +
42 sizeof(struct dccp_hdr_ext) +
43 dccp_packet_hdr_len(dcb->dccpd_type);
45 u64 ackno = dp->dccps_gsr;
47 dccp_inc_seqno(&dp->dccps_gss);
49 switch (dcb->dccpd_type) {
54 case DCCP_PKT_SYNCACK:
55 ackno = dcb->dccpd_seq;
59 dcb->dccpd_seq = dp->dccps_gss;
60 dccp_insert_options(sk, skb);
62 skb->h.raw = skb_push(skb, dccp_header_size);
65 * Data packets are not cloned as they are never retransmitted
68 skb_set_owner_w(skb, sk);
70 /* Build DCCP header and checksum it. */
71 memset(dh, 0, dccp_header_size);
72 dh->dccph_type = dcb->dccpd_type;
73 dh->dccph_sport = inet->sport;
74 dh->dccph_dport = inet->dport;
75 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
76 dh->dccph_ccval = dcb->dccpd_ccval;
77 /* XXX For now we're using only 48 bits sequence numbers */
80 dp->dccps_awh = dp->dccps_gss;
81 dccp_hdr_set_seq(dh, dp->dccps_gss);
83 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
85 switch (dcb->dccpd_type) {
86 case DCCP_PKT_REQUEST:
87 dccp_hdr_request(skb)->dccph_req_service =
91 dccp_hdr_reset(skb)->dccph_reset_code =
92 dcb->dccpd_reset_code;
96 dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr,
100 dccp_event_ack_sent(sk);
102 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
104 err = ip_queue_xmit(skb, 0);
108 /* NET_XMIT_CN is special. It does not guarantee,
109 * that this packet is lost. It tells that device
110 * is about to start to drop packets or already
111 * drops some packets of the same priority and
112 * invokes us to send less aggressively.
114 return err == NET_XMIT_CN ? 0 : err;
119 unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
121 struct dccp_sock *dp = dccp_sk(sk);
125 * FIXME: we really should be using the af_specific thing to support
127 * mss_now = pmtu - tp->af_specific->net_header_len -
128 * sizeof(struct dccp_hdr) - sizeof(struct dccp_hdr_ext);
130 mss_now = pmtu - sizeof(struct iphdr) - sizeof(struct dccp_hdr) -
131 sizeof(struct dccp_hdr_ext);
133 /* Now subtract optional transport overhead */
134 mss_now -= dp->dccps_ext_header_len;
137 * FIXME: this should come from the CCID infrastructure, where, say,
138 * TFRC will say it wants TIMESTAMPS, ELAPSED time, etc, for now lets
139 * put a rough estimate for NDP + TIMESTAMP + TIMESTAMP_ECHO + ELAPSED
140 * TIME + TFRC_OPT_LOSS_EVENT_RATE + TFRC_OPT_RECEIVE_RATE + padding to
141 * make it a multiple of 4
144 mss_now -= ((5 + 6 + 10 + 6 + 6 + 6 + 3) / 4) * 4;
146 /* And store cached results */
147 dp->dccps_pmtu_cookie = pmtu;
148 dp->dccps_mss_cache = mss_now;
153 int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, const int len)
155 const struct dccp_sock *dp = dccp_sk(sk);
156 int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, len);
159 const struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts;
160 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
162 if (sk->sk_state == DCCP_PARTOPEN) {
163 /* See 8.1.5. Handshake Completion */
164 inet_csk_schedule_ack(sk);
165 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
166 inet_csk(sk)->icsk_rto,
168 dcb->dccpd_type = DCCP_PKT_DATAACK;
170 * FIXME: we really should have a
171 * dccps_ack_pending or use icsk.
173 } else if (inet_csk_ack_scheduled(sk) ||
174 (dp->dccps_options.dccpo_send_ack_vector &&
175 ap->dccpap_buf_ackno != DCCP_MAX_SEQNO + 1 &&
176 ap->dccpap_ack_seqno == DCCP_MAX_SEQNO + 1))
177 dcb->dccpd_type = DCCP_PKT_DATAACK;
179 dcb->dccpd_type = DCCP_PKT_DATA;
181 err = dccp_transmit_skb(sk, skb);
182 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
188 int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
190 if (inet_sk_rebuild_header(sk) != 0)
191 return -EHOSTUNREACH; /* Routing failure or similar. */
193 return dccp_transmit_skb(sk, (skb_cloned(skb) ?
194 pskb_copy(skb, GFP_ATOMIC):
195 skb_clone(skb, GFP_ATOMIC)));
198 struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
199 struct request_sock *req)
202 const int dccp_header_size = sizeof(struct dccp_hdr) +
203 sizeof(struct dccp_hdr_ext) +
204 sizeof(struct dccp_hdr_response);
205 struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN +
211 /* Reserve space for headers. */
212 skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size);
214 skb->dst = dst_clone(dst);
217 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
218 DCCP_SKB_CB(skb)->dccpd_seq = dccp_rsk(req)->dreq_iss;
219 dccp_insert_options(sk, skb);
221 skb->h.raw = skb_push(skb, dccp_header_size);
224 memset(dh, 0, dccp_header_size);
226 dh->dccph_sport = inet_sk(sk)->sport;
227 dh->dccph_dport = inet_rsk(req)->rmt_port;
228 dh->dccph_doff = (dccp_header_size +
229 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
230 dh->dccph_type = DCCP_PKT_RESPONSE;
232 dccp_hdr_set_seq(dh, dccp_rsk(req)->dreq_iss);
233 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dccp_rsk(req)->dreq_isr);
235 dh->dccph_checksum = dccp_v4_checksum(skb, inet_rsk(req)->loc_addr,
236 inet_rsk(req)->rmt_addr);
238 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
242 struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
243 const enum dccp_reset_codes code)
247 struct dccp_sock *dp = dccp_sk(sk);
248 const int dccp_header_size = sizeof(struct dccp_hdr) +
249 sizeof(struct dccp_hdr_ext) +
250 sizeof(struct dccp_hdr_reset);
251 struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN +
257 /* Reserve space for headers. */
258 skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size);
260 skb->dst = dst_clone(dst);
263 dccp_inc_seqno(&dp->dccps_gss);
265 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
266 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
267 DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss;
268 dccp_insert_options(sk, skb);
270 skb->h.raw = skb_push(skb, dccp_header_size);
273 memset(dh, 0, dccp_header_size);
275 dh->dccph_sport = inet_sk(sk)->sport;
276 dh->dccph_dport = inet_sk(sk)->dport;
277 dh->dccph_doff = (dccp_header_size +
278 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
279 dh->dccph_type = DCCP_PKT_RESET;
281 dccp_hdr_set_seq(dh, dp->dccps_gss);
282 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr);
284 dccp_hdr_reset(skb)->dccph_reset_code = code;
286 dh->dccph_checksum = dccp_v4_checksum(skb, inet_sk(sk)->saddr,
289 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
294 * Do all connect socket setups that can be done AF independent.
296 static inline void dccp_connect_init(struct sock *sk)
298 struct dst_entry *dst = __sk_dst_get(sk);
299 struct inet_connection_sock *icsk = inet_csk(sk);
302 sock_reset_flag(sk, SOCK_DONE);
304 dccp_sync_mss(sk, dst_mtu(dst));
307 * FIXME: set dp->{dccps_swh,dccps_swl}, with
308 * something like dccp_inc_seq
311 icsk->icsk_retransmits = 0;
314 int dccp_connect(struct sock *sk)
317 struct inet_connection_sock *icsk = inet_csk(sk);
319 dccp_connect_init(sk);
321 skb = alloc_skb(MAX_DCCP_HEADER + 15, sk->sk_allocation);
322 if (unlikely(skb == NULL))
325 /* Reserve space for headers. */
326 skb_reserve(skb, MAX_DCCP_HEADER);
328 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
329 /* FIXME: set service to something meaningful, coming
331 DCCP_SKB_CB(skb)->dccpd_service = 0;
333 skb_set_owner_w(skb, sk);
335 BUG_TRAP(sk->sk_send_head == NULL);
336 sk->sk_send_head = skb;
337 dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL));
338 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
340 /* Timer for repeating the REQUEST until an answer. */
341 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
342 icsk->icsk_rto, DCCP_RTO_MAX);
346 void dccp_send_ack(struct sock *sk)
348 /* If we have been reset, we may not send again. */
349 if (sk->sk_state != DCCP_CLOSED) {
350 struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC);
353 inet_csk_schedule_ack(sk);
354 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
355 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
361 /* Reserve space for headers */
362 skb_reserve(skb, MAX_DCCP_HEADER);
364 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
365 skb_set_owner_w(skb, sk);
366 dccp_transmit_skb(sk, skb);
370 EXPORT_SYMBOL_GPL(dccp_send_ack);
372 void dccp_send_delayed_ack(struct sock *sk)
374 struct inet_connection_sock *icsk = inet_csk(sk);
376 * FIXME: tune this timer. elapsed time fixes the skew, so no problem
377 * with using 2s, and active senders also piggyback the ACK into a
378 * DATAACK packet, so this is really for quiescent senders.
380 unsigned long timeout = jiffies + 2 * HZ;
382 /* Use new timeout only if there wasn't a older one earlier. */
383 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
384 /* If delack timer was blocked or is about to expire,
387 * FIXME: check the "about to expire" part
389 if (icsk->icsk_ack.blocked) {
394 if (!time_before(timeout, icsk->icsk_ack.timeout))
395 timeout = icsk->icsk_ack.timeout;
397 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
398 icsk->icsk_ack.timeout = timeout;
399 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
402 void dccp_send_sync(struct sock *sk, const u64 seq,
403 const enum dccp_pkt_type pkt_type)
406 * We are not putting this on the write queue, so
407 * dccp_transmit_skb() will set the ownership to this
410 struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC);
413 /* FIXME: how to make sure the sync is sent? */
416 /* Reserve space for headers and prepare control bits. */
417 skb_reserve(skb, MAX_DCCP_HEADER);
419 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
420 DCCP_SKB_CB(skb)->dccpd_seq = seq;
422 skb_set_owner_w(skb, sk);
423 dccp_transmit_skb(sk, skb);
427 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
428 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
431 void dccp_send_close(struct sock *sk, const int active)
433 struct dccp_sock *dp = dccp_sk(sk);
435 const unsigned int prio = active ? GFP_KERNEL : GFP_ATOMIC;
437 skb = alloc_skb(sk->sk_prot->max_header, prio);
441 /* Reserve space for headers and prepare control bits. */
442 skb_reserve(skb, sk->sk_prot->max_header);
444 DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ?
445 DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ;
447 skb_set_owner_w(skb, sk);
449 BUG_TRAP(sk->sk_send_head == NULL);
450 sk->sk_send_head = skb;
451 dccp_transmit_skb(sk, skb_clone(skb, prio));
453 dccp_transmit_skb(sk, skb);
455 ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
456 ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);