4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/dccp.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/slab.h>
18 #include <net/inet_sock.h>
25 static inline void dccp_event_ack_sent(struct sock *sk)
27 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
30 static void dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
32 skb_set_owner_w(skb, sk);
33 WARN_ON(sk->sk_send_head);
34 sk->sk_send_head = skb;
38 * All SKB's seen here are completely headerless. It is our
39 * job to build the DCCP header, and pass the packet down to
40 * IP so it can do the same plus pass the packet off to the
43 static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
45 if (likely(skb != NULL)) {
46 const struct inet_sock *inet = inet_sk(sk);
47 const struct inet_connection_sock *icsk = inet_csk(sk);
48 struct dccp_sock *dp = dccp_sk(sk);
49 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
51 /* XXX For now we're using only 48 bits sequence numbers */
52 const u32 dccp_header_size = sizeof(*dh) +
53 sizeof(struct dccp_hdr_ext) +
54 dccp_packet_hdr_len(dcb->dccpd_type);
56 u64 ackno = dp->dccps_gsr;
58 * Increment GSS here already in case the option code needs it.
59 * Update GSS for real only if option processing below succeeds.
61 dcb->dccpd_seq = ADD48(dp->dccps_gss, 1);
63 switch (dcb->dccpd_type) {
67 case DCCP_PKT_DATAACK:
71 case DCCP_PKT_REQUEST:
73 /* Use ISS on the first (non-retransmitted) Request. */
74 if (icsk->icsk_retransmits == 0)
75 dcb->dccpd_seq = dp->dccps_iss;
79 case DCCP_PKT_SYNCACK:
80 ackno = dcb->dccpd_ack_seq;
84 * Set owner/destructor: some skbs are allocated via
85 * alloc_skb (e.g. when retransmission may happen).
86 * Only Data, DataAck, and Reset packets should come
87 * through here with skb->sk set.
90 skb_set_owner_w(skb, sk);
94 if (dccp_insert_options(sk, skb)) {
100 /* Build DCCP header and checksum it. */
101 dh = dccp_zeroed_hdr(skb, dccp_header_size);
102 dh->dccph_type = dcb->dccpd_type;
103 dh->dccph_sport = inet->inet_sport;
104 dh->dccph_dport = inet->inet_dport;
105 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
106 dh->dccph_ccval = dcb->dccpd_ccval;
107 dh->dccph_cscov = dp->dccps_pcslen;
108 /* XXX For now we're using only 48 bits sequence numbers */
111 dccp_update_gss(sk, dcb->dccpd_seq);
112 dccp_hdr_set_seq(dh, dp->dccps_gss);
114 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
116 switch (dcb->dccpd_type) {
117 case DCCP_PKT_REQUEST:
118 dccp_hdr_request(skb)->dccph_req_service =
121 * Limit Ack window to ISS <= P.ackno <= GSS, so that
122 * only Responses to Requests we sent are considered.
124 dp->dccps_awl = dp->dccps_iss;
127 dccp_hdr_reset(skb)->dccph_reset_code =
128 dcb->dccpd_reset_code;
132 icsk->icsk_af_ops->send_check(sk, skb);
135 dccp_event_ack_sent(sk);
137 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
139 err = icsk->icsk_af_ops->queue_xmit(skb);
140 return net_xmit_eval(err);
146 * dccp_determine_ccmps - Find out about CCID-specfic packet-size limits
147 * We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.),
148 * since the RX CCID is restricted to feedback packets (Acks), which are small
149 * in comparison with the data traffic. A value of 0 means "no current CCMPS".
151 static u32 dccp_determine_ccmps(const struct dccp_sock *dp)
153 const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid;
155 if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL)
157 return tx_ccid->ccid_ops->ccid_ccmps;
160 unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
162 struct inet_connection_sock *icsk = inet_csk(sk);
163 struct dccp_sock *dp = dccp_sk(sk);
164 u32 ccmps = dccp_determine_ccmps(dp);
165 u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu;
167 /* Account for header lengths and IPv4/v6 option overhead */
168 cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len +
169 sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext));
172 * Leave enough headroom for common DCCP header options.
173 * This only considers options which may appear on DCCP-Data packets, as
174 * per table 3 in RFC 4340, 5.8. When running out of space for other
175 * options (eg. Ack Vector which can take up to 255 bytes), it is better
176 * to schedule a separate Ack. Thus we leave headroom for the following:
177 * - 1 byte for Slow Receiver (11.6)
178 * - 6 bytes for Timestamp (13.1)
179 * - 10 bytes for Timestamp Echo (13.3)
180 * - 8 bytes for NDP count (7.7, when activated)
181 * - 6 bytes for Data Checksum (9.3)
182 * - %DCCPAV_MIN_OPTLEN bytes for Ack Vector size (11.4, when enabled)
184 cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 +
185 (dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4);
187 /* And store cached results */
188 icsk->icsk_pmtu_cookie = pmtu;
189 dp->dccps_mss_cache = cur_mps;
194 EXPORT_SYMBOL_GPL(dccp_sync_mss);
196 void dccp_write_space(struct sock *sk)
198 struct socket_wq *wq;
201 wq = rcu_dereference(sk->sk_wq);
202 if (wq_has_sleeper(wq))
203 wake_up_interruptible(&wq->wait);
204 /* Should agree with poll, otherwise some programs break */
205 if (sock_writeable(sk))
206 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
212 * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
213 * @sk: socket to wait for
214 * @skb: current skb to pass on for waiting
215 * @delay: sleep timeout in milliseconds (> 0)
216 * This function is called by default when the socket is closed, and
217 * when a non-zero linger time is set on the socket. For consistency
219 static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay)
221 struct dccp_sock *dp = dccp_sk(sk);
223 unsigned long jiffdelay;
227 dccp_pr_debug("delayed send by %d msec\n", delay);
228 jiffdelay = msecs_to_jiffies(delay);
230 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
232 sk->sk_write_pending++;
234 schedule_timeout(jiffdelay);
236 sk->sk_write_pending--;
240 if (signal_pending(current))
243 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
244 } while ((delay = rc) > 0);
246 finish_wait(sk_sleep(sk), &wait);
258 * dccp_xmit_packet - Send data packet under control of CCID
259 * Transmits next-queued payload and informs CCID to account for the packet.
261 static void dccp_xmit_packet(struct sock *sk)
264 struct dccp_sock *dp = dccp_sk(sk);
265 struct sk_buff *skb = skb_dequeue(&sk->sk_write_queue);
267 if (unlikely(skb == NULL))
271 if (sk->sk_state == DCCP_PARTOPEN) {
272 const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
274 * See 8.1.5 - Handshake Completion.
276 * For robustness we resend Confirm options until the client has
277 * entered OPEN. During the initial feature negotiation, the MPS
278 * is smaller than usual, reduced by the Change/Confirm options.
280 if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
281 DCCP_WARN("Payload too large (%d) for featneg.\n", len);
283 dccp_feat_list_purge(&dp->dccps_featneg);
286 inet_csk_schedule_ack(sk);
287 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
288 inet_csk(sk)->icsk_rto,
290 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
291 } else if (dccp_ack_pending(sk)) {
292 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
294 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
297 err = dccp_transmit_skb(sk, skb);
299 dccp_pr_debug("transmit_skb() returned err=%d\n", err);
301 * Register this one as sent even if an error occurred. To the remote
302 * end a local packet drop is indistinguishable from network loss, i.e.
303 * any local drop will eventually be reported via receiver feedback.
305 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
308 void dccp_write_xmit(struct sock *sk, int block)
310 struct dccp_sock *dp = dccp_sk(sk);
313 while ((skb = skb_peek(&sk->sk_write_queue))) {
314 int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
316 switch (ccid_packet_dequeue_eval(rc)) {
317 case CCID_PACKET_WILL_DEQUEUE_LATER:
319 case CCID_PACKET_DELAY:
321 sk_reset_timer(sk, &dp->dccps_xmit_timer,
322 msecs_to_jiffies(rc)+jiffies);
325 rc = dccp_wait_for_ccid(sk, skb, rc);
326 if (rc && rc != -EINTR) {
327 DCCP_BUG("err=%d after dccp_wait_for_ccid", rc);
328 skb_dequeue(&sk->sk_write_queue);
333 case CCID_PACKET_SEND_AT_ONCE:
334 dccp_xmit_packet(sk);
336 case CCID_PACKET_ERR:
337 skb_dequeue(&sk->sk_write_queue);
339 dccp_pr_debug("packet discarded due to err=%d\n", rc);
345 * dccp_retransmit_skb - Retransmit Request, Close, or CloseReq packets
346 * There are only four retransmittable packet types in DCCP:
347 * - Request in client-REQUEST state (sec. 8.1.1),
348 * - CloseReq in server-CLOSEREQ state (sec. 8.3),
349 * - Close in node-CLOSING state (sec. 8.3),
350 * - Acks in client-PARTOPEN state (sec. 8.1.5, handled by dccp_delack_timer()).
351 * This function expects sk->sk_send_head to contain the original skb.
353 int dccp_retransmit_skb(struct sock *sk)
355 WARN_ON(sk->sk_send_head == NULL);
357 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
358 return -EHOSTUNREACH; /* Routing failure or similar. */
360 /* this count is used to distinguish original and retransmitted skb */
361 inet_csk(sk)->icsk_retransmits++;
363 return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
366 struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
367 struct request_sock *req)
370 struct dccp_request_sock *dreq;
371 const u32 dccp_header_size = sizeof(struct dccp_hdr) +
372 sizeof(struct dccp_hdr_ext) +
373 sizeof(struct dccp_hdr_response);
374 struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
379 /* Reserve space for headers. */
380 skb_reserve(skb, sk->sk_prot->max_header);
382 skb_dst_set(skb, dst_clone(dst));
384 dreq = dccp_rsk(req);
385 if (inet_rsk(req)->acked) /* increase ISS upon retransmission */
386 dccp_inc_seqno(&dreq->dreq_iss);
387 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
388 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss;
390 /* Resolve feature dependencies resulting from choice of CCID */
391 if (dccp_feat_server_ccid_dependencies(dreq))
392 goto response_failed;
394 if (dccp_insert_options_rsk(dreq, skb))
395 goto response_failed;
397 /* Build and checksum header */
398 dh = dccp_zeroed_hdr(skb, dccp_header_size);
400 dh->dccph_sport = inet_rsk(req)->loc_port;
401 dh->dccph_dport = inet_rsk(req)->rmt_port;
402 dh->dccph_doff = (dccp_header_size +
403 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
404 dh->dccph_type = DCCP_PKT_RESPONSE;
406 dccp_hdr_set_seq(dh, dreq->dreq_iss);
407 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr);
408 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
410 dccp_csum_outgoing(skb);
412 /* We use `acked' to remember that a Response was already sent. */
413 inet_rsk(req)->acked = 1;
414 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
421 EXPORT_SYMBOL_GPL(dccp_make_response);
423 /* answer offending packet in @rcv_skb with Reset from control socket @ctl */
424 struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb)
426 struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh;
427 struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb);
428 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
429 sizeof(struct dccp_hdr_ext) +
430 sizeof(struct dccp_hdr_reset);
431 struct dccp_hdr_reset *dhr;
434 skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
438 skb_reserve(skb, sk->sk_prot->max_header);
440 /* Swap the send and the receive. */
441 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
442 dh->dccph_type = DCCP_PKT_RESET;
443 dh->dccph_sport = rxdh->dccph_dport;
444 dh->dccph_dport = rxdh->dccph_sport;
445 dh->dccph_doff = dccp_hdr_reset_len / 4;
448 dhr = dccp_hdr_reset(skb);
449 dhr->dccph_reset_code = dcb->dccpd_reset_code;
451 switch (dcb->dccpd_reset_code) {
452 case DCCP_RESET_CODE_PACKET_ERROR:
453 dhr->dccph_reset_data[0] = rxdh->dccph_type;
455 case DCCP_RESET_CODE_OPTION_ERROR: /* fall through */
456 case DCCP_RESET_CODE_MANDATORY_ERROR:
457 memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3);
461 * From RFC 4340, 8.3.1:
462 * If P.ackno exists, set R.seqno := P.ackno + 1.
463 * Else set R.seqno := 0.
465 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
466 dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1));
467 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq);
469 dccp_csum_outgoing(skb);
473 EXPORT_SYMBOL_GPL(dccp_ctl_make_reset);
475 /* send Reset on established socket, to close or abort the connection */
476 int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
480 * FIXME: what if rebuild_header fails?
481 * Should we be doing a rebuild_header here?
483 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk);
488 skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC);
492 /* Reserve space for headers and prepare control bits. */
493 skb_reserve(skb, sk->sk_prot->max_header);
494 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
495 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
497 return dccp_transmit_skb(sk, skb);
501 * Do all connect socket setups that can be done AF independent.
503 int dccp_connect(struct sock *sk)
506 struct dccp_sock *dp = dccp_sk(sk);
507 struct dst_entry *dst = __sk_dst_get(sk);
508 struct inet_connection_sock *icsk = inet_csk(sk);
511 sock_reset_flag(sk, SOCK_DONE);
513 dccp_sync_mss(sk, dst_mtu(dst));
515 /* do not connect if feature negotiation setup fails */
516 if (dccp_feat_finalise_settings(dccp_sk(sk)))
519 /* Initialise GAR as per 8.5; AWL/AWH are set in dccp_transmit_skb() */
520 dp->dccps_gar = dp->dccps_iss;
522 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
523 if (unlikely(skb == NULL))
526 /* Reserve space for headers. */
527 skb_reserve(skb, sk->sk_prot->max_header);
529 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
531 dccp_skb_entail(sk, skb);
532 dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL));
533 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
535 /* Timer for repeating the REQUEST until an answer. */
536 icsk->icsk_retransmits = 0;
537 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
538 icsk->icsk_rto, DCCP_RTO_MAX);
542 EXPORT_SYMBOL_GPL(dccp_connect);
544 void dccp_send_ack(struct sock *sk)
546 /* If we have been reset, we may not send again. */
547 if (sk->sk_state != DCCP_CLOSED) {
548 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
552 inet_csk_schedule_ack(sk);
553 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
554 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
560 /* Reserve space for headers */
561 skb_reserve(skb, sk->sk_prot->max_header);
562 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
563 dccp_transmit_skb(sk, skb);
567 EXPORT_SYMBOL_GPL(dccp_send_ack);
570 /* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */
571 void dccp_send_delayed_ack(struct sock *sk)
573 struct inet_connection_sock *icsk = inet_csk(sk);
575 * FIXME: tune this timer. elapsed time fixes the skew, so no problem
576 * with using 2s, and active senders also piggyback the ACK into a
577 * DATAACK packet, so this is really for quiescent senders.
579 unsigned long timeout = jiffies + 2 * HZ;
581 /* Use new timeout only if there wasn't a older one earlier. */
582 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
583 /* If delack timer was blocked or is about to expire,
586 * FIXME: check the "about to expire" part
588 if (icsk->icsk_ack.blocked) {
593 if (!time_before(timeout, icsk->icsk_ack.timeout))
594 timeout = icsk->icsk_ack.timeout;
596 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
597 icsk->icsk_ack.timeout = timeout;
598 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
602 void dccp_send_sync(struct sock *sk, const u64 ackno,
603 const enum dccp_pkt_type pkt_type)
606 * We are not putting this on the write queue, so
607 * dccp_transmit_skb() will set the ownership to this
610 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
613 /* FIXME: how to make sure the sync is sent? */
614 DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type));
618 /* Reserve space for headers and prepare control bits. */
619 skb_reserve(skb, sk->sk_prot->max_header);
620 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
621 DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
623 dccp_transmit_skb(sk, skb);
626 EXPORT_SYMBOL_GPL(dccp_send_sync);
629 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
630 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
633 void dccp_send_close(struct sock *sk, const int active)
635 struct dccp_sock *dp = dccp_sk(sk);
637 const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
639 skb = alloc_skb(sk->sk_prot->max_header, prio);
643 /* Reserve space for headers and prepare control bits. */
644 skb_reserve(skb, sk->sk_prot->max_header);
645 if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait)
646 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ;
648 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
651 dccp_write_xmit(sk, 1);
652 dccp_skb_entail(sk, skb);
653 dccp_transmit_skb(sk, skb_clone(skb, prio));
655 * Retransmission timer for active-close: RFC 4340, 8.3 requires
656 * to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ
657 * state can be left. The initial timeout is 2 RTTs.
658 * Since RTT measurement is done by the CCIDs, there is no easy
659 * way to get an RTT sample. The fallback RTT from RFC 4340, 3.4
660 * is too low (200ms); we use a high value to avoid unnecessary
661 * retransmissions when the link RTT is > 0.2 seconds.
662 * FIXME: Let main module sample RTTs and use that instead.
664 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
665 DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
667 dccp_transmit_skb(sk, skb);