4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/config.h>
14 #include <linux/dccp.h>
15 #include <linux/kernel.h>
16 #include <linux/skbuff.h>
18 #include <net/inet_sock.h>
25 static inline void dccp_event_ack_sent(struct sock *sk)
27 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
30 static inline void dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
32 skb_set_owner_w(skb, sk);
33 WARN_ON(sk->sk_send_head);
34 sk->sk_send_head = skb;
38 * All SKB's seen here are completely headerless. It is our
39 * job to build the DCCP header, and pass the packet down to
40 * IP so it can do the same plus pass the packet off to the
43 static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
45 if (likely(skb != NULL)) {
46 const struct inet_sock *inet = inet_sk(sk);
47 const struct inet_connection_sock *icsk = inet_csk(sk);
48 struct dccp_sock *dp = dccp_sk(sk);
49 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
51 /* XXX For now we're using only 48 bits sequence numbers */
52 const int dccp_header_size = sizeof(*dh) +
53 sizeof(struct dccp_hdr_ext) +
54 dccp_packet_hdr_len(dcb->dccpd_type);
56 u64 ackno = dp->dccps_gsr;
58 dccp_inc_seqno(&dp->dccps_gss);
60 switch (dcb->dccpd_type) {
64 case DCCP_PKT_DATAACK:
67 case DCCP_PKT_REQUEST:
72 case DCCP_PKT_SYNCACK:
73 ackno = dcb->dccpd_seq;
77 * Only data packets should come through with skb->sk
81 skb_set_owner_w(skb, sk);
85 dcb->dccpd_seq = dp->dccps_gss;
86 dccp_insert_options(sk, skb);
88 skb->h.raw = skb_push(skb, dccp_header_size);
91 /* Build DCCP header and checksum it. */
92 memset(dh, 0, dccp_header_size);
93 dh->dccph_type = dcb->dccpd_type;
94 dh->dccph_sport = inet->sport;
95 dh->dccph_dport = inet->dport;
96 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
97 dh->dccph_ccval = dcb->dccpd_ccval;
98 /* XXX For now we're using only 48 bits sequence numbers */
101 dp->dccps_awh = dp->dccps_gss;
102 dccp_hdr_set_seq(dh, dp->dccps_gss);
104 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
106 switch (dcb->dccpd_type) {
107 case DCCP_PKT_REQUEST:
108 dccp_hdr_request(skb)->dccph_req_service =
112 dccp_hdr_reset(skb)->dccph_reset_code =
113 dcb->dccpd_reset_code;
117 icsk->icsk_af_ops->send_check(sk, skb->len, skb);
120 dccp_event_ack_sent(sk);
122 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
124 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
125 err = icsk->icsk_af_ops->queue_xmit(skb, 0);
129 /* NET_XMIT_CN is special. It does not guarantee,
130 * that this packet is lost. It tells that device
131 * is about to start to drop packets or already
132 * drops some packets of the same priority and
133 * invokes us to send less aggressively.
135 return err == NET_XMIT_CN ? 0 : err;
140 unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
142 struct inet_connection_sock *icsk = inet_csk(sk);
143 struct dccp_sock *dp = dccp_sk(sk);
144 int mss_now = (pmtu - icsk->icsk_af_ops->net_header_len -
145 sizeof(struct dccp_hdr) - sizeof(struct dccp_hdr_ext));
147 /* Now subtract optional transport overhead */
148 mss_now -= icsk->icsk_ext_hdr_len;
151 * FIXME: this should come from the CCID infrastructure, where, say,
152 * TFRC will say it wants TIMESTAMPS, ELAPSED time, etc, for now lets
153 * put a rough estimate for NDP + TIMESTAMP + TIMESTAMP_ECHO + ELAPSED
154 * TIME + TFRC_OPT_LOSS_EVENT_RATE + TFRC_OPT_RECEIVE_RATE + padding to
155 * make it a multiple of 4
158 mss_now -= ((5 + 6 + 10 + 6 + 6 + 6 + 3) / 4) * 4;
160 /* And store cached results */
161 icsk->icsk_pmtu_cookie = pmtu;
162 dp->dccps_mss_cache = mss_now;
167 EXPORT_SYMBOL_GPL(dccp_sync_mss);
169 void dccp_write_space(struct sock *sk)
171 read_lock(&sk->sk_callback_lock);
173 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
174 wake_up_interruptible(sk->sk_sleep);
175 /* Should agree with poll, otherwise some programs break */
176 if (sock_writeable(sk))
177 sk_wake_async(sk, 2, POLL_OUT);
179 read_unlock(&sk->sk_callback_lock);
183 * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
184 * @sk: socket to wait for
185 * @timeo: for how long
187 static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb,
190 struct dccp_sock *dp = dccp_sk(sk);
196 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
198 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
202 if (signal_pending(current))
205 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
209 delay = msecs_to_jiffies(rc);
210 if (delay > *timeo || delay < 0)
213 sk->sk_write_pending++;
215 *timeo -= schedule_timeout(delay);
217 sk->sk_write_pending--;
220 finish_wait(sk->sk_sleep, &wait);
230 rc = sock_intr_errno(*timeo);
234 int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo)
236 const struct dccp_sock *dp = dccp_sk(sk);
237 int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
241 err = dccp_wait_for_ccid(sk, skb, timeo);
244 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
245 const int len = skb->len;
247 if (sk->sk_state == DCCP_PARTOPEN) {
248 /* See 8.1.5. Handshake Completion */
249 inet_csk_schedule_ack(sk);
250 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
251 inet_csk(sk)->icsk_rto,
253 dcb->dccpd_type = DCCP_PKT_DATAACK;
254 } else if (dccp_ack_pending(sk))
255 dcb->dccpd_type = DCCP_PKT_DATAACK;
257 dcb->dccpd_type = DCCP_PKT_DATA;
259 err = dccp_transmit_skb(sk, skb);
260 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
267 int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
269 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
270 return -EHOSTUNREACH; /* Routing failure or similar. */
272 return dccp_transmit_skb(sk, (skb_cloned(skb) ?
273 pskb_copy(skb, GFP_ATOMIC):
274 skb_clone(skb, GFP_ATOMIC)));
277 struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
278 struct request_sock *req)
281 struct dccp_request_sock *dreq;
282 const int dccp_header_size = sizeof(struct dccp_hdr) +
283 sizeof(struct dccp_hdr_ext) +
284 sizeof(struct dccp_hdr_response);
285 struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN +
291 /* Reserve space for headers. */
292 skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size);
294 skb->dst = dst_clone(dst);
297 dreq = dccp_rsk(req);
298 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
299 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss;
300 dccp_insert_options(sk, skb);
302 skb->h.raw = skb_push(skb, dccp_header_size);
305 memset(dh, 0, dccp_header_size);
307 dh->dccph_sport = inet_sk(sk)->sport;
308 dh->dccph_dport = inet_rsk(req)->rmt_port;
309 dh->dccph_doff = (dccp_header_size +
310 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
311 dh->dccph_type = DCCP_PKT_RESPONSE;
313 dccp_hdr_set_seq(dh, dreq->dreq_iss);
314 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr);
315 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
317 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
321 EXPORT_SYMBOL_GPL(dccp_make_response);
323 static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
324 const enum dccp_reset_codes code)
328 struct dccp_sock *dp = dccp_sk(sk);
329 const int dccp_header_size = sizeof(struct dccp_hdr) +
330 sizeof(struct dccp_hdr_ext) +
331 sizeof(struct dccp_hdr_reset);
332 struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN +
338 /* Reserve space for headers. */
339 skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size);
341 skb->dst = dst_clone(dst);
344 dccp_inc_seqno(&dp->dccps_gss);
346 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
347 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
348 DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss;
349 dccp_insert_options(sk, skb);
351 skb->h.raw = skb_push(skb, dccp_header_size);
354 memset(dh, 0, dccp_header_size);
356 dh->dccph_sport = inet_sk(sk)->sport;
357 dh->dccph_dport = inet_sk(sk)->dport;
358 dh->dccph_doff = (dccp_header_size +
359 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
360 dh->dccph_type = DCCP_PKT_RESET;
362 dccp_hdr_set_seq(dh, dp->dccps_gss);
363 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr);
365 dccp_hdr_reset(skb)->dccph_reset_code = code;
366 inet_csk(sk)->icsk_af_ops->send_check(sk, skb->len, skb);
368 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
372 int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
375 * FIXME: what if rebuild_header fails?
376 * Should we be doing a rebuild_header here?
378 int err = inet_sk_rebuild_header(sk);
381 struct sk_buff *skb = dccp_make_reset(sk, sk->sk_dst_cache,
384 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
385 err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, 0);
386 if (err == NET_XMIT_CN)
395 * Do all connect socket setups that can be done AF independent.
397 static inline void dccp_connect_init(struct sock *sk)
399 struct dccp_sock *dp = dccp_sk(sk);
400 struct dst_entry *dst = __sk_dst_get(sk);
401 struct inet_connection_sock *icsk = inet_csk(sk);
404 sock_reset_flag(sk, SOCK_DONE);
406 dccp_sync_mss(sk, dst_mtu(dst));
408 dccp_update_gss(sk, dp->dccps_iss);
410 * SWL and AWL are initially adjusted so that they are not less than
411 * the initial Sequence Numbers received and sent, respectively:
412 * SWL := max(GSR + 1 - floor(W/4), ISR),
413 * AWL := max(GSS - W' + 1, ISS).
414 * These adjustments MUST be applied only at the beginning of the
417 dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss));
419 icsk->icsk_retransmits = 0;
422 int dccp_connect(struct sock *sk)
425 struct inet_connection_sock *icsk = inet_csk(sk);
427 dccp_connect_init(sk);
429 skb = alloc_skb(MAX_DCCP_HEADER + 15, sk->sk_allocation);
430 if (unlikely(skb == NULL))
433 /* Reserve space for headers. */
434 skb_reserve(skb, MAX_DCCP_HEADER);
436 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
439 dccp_skb_entail(sk, skb);
440 dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL));
441 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
443 /* Timer for repeating the REQUEST until an answer. */
444 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
445 icsk->icsk_rto, DCCP_RTO_MAX);
449 EXPORT_SYMBOL_GPL(dccp_connect);
451 void dccp_send_ack(struct sock *sk)
453 /* If we have been reset, we may not send again. */
454 if (sk->sk_state != DCCP_CLOSED) {
455 struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC);
458 inet_csk_schedule_ack(sk);
459 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
460 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
466 /* Reserve space for headers */
467 skb_reserve(skb, MAX_DCCP_HEADER);
469 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
470 dccp_transmit_skb(sk, skb);
474 EXPORT_SYMBOL_GPL(dccp_send_ack);
476 void dccp_send_delayed_ack(struct sock *sk)
478 struct inet_connection_sock *icsk = inet_csk(sk);
480 * FIXME: tune this timer. elapsed time fixes the skew, so no problem
481 * with using 2s, and active senders also piggyback the ACK into a
482 * DATAACK packet, so this is really for quiescent senders.
484 unsigned long timeout = jiffies + 2 * HZ;
486 /* Use new timeout only if there wasn't a older one earlier. */
487 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
488 /* If delack timer was blocked or is about to expire,
491 * FIXME: check the "about to expire" part
493 if (icsk->icsk_ack.blocked) {
498 if (!time_before(timeout, icsk->icsk_ack.timeout))
499 timeout = icsk->icsk_ack.timeout;
501 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
502 icsk->icsk_ack.timeout = timeout;
503 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
506 void dccp_send_sync(struct sock *sk, const u64 seq,
507 const enum dccp_pkt_type pkt_type)
510 * We are not putting this on the write queue, so
511 * dccp_transmit_skb() will set the ownership to this
514 struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC);
517 /* FIXME: how to make sure the sync is sent? */
520 /* Reserve space for headers and prepare control bits. */
521 skb_reserve(skb, MAX_DCCP_HEADER);
523 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
524 DCCP_SKB_CB(skb)->dccpd_seq = seq;
526 dccp_transmit_skb(sk, skb);
530 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
531 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
534 void dccp_send_close(struct sock *sk, const int active)
536 struct dccp_sock *dp = dccp_sk(sk);
538 const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
540 skb = alloc_skb(sk->sk_prot->max_header, prio);
544 /* Reserve space for headers and prepare control bits. */
545 skb_reserve(skb, sk->sk_prot->max_header);
547 DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ?
548 DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ;
551 dccp_skb_entail(sk, skb);
552 dccp_transmit_skb(sk, skb_clone(skb, prio));
554 dccp_transmit_skb(sk, skb);