1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 Intel Corp.
7 * This file is part of the SCTP kernel implementation
9 * These functions implement the sctp_outq class. The outqueue handles
10 * bundling and queueing of outgoing SCTP chunks.
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, write to
26 * the Free Software Foundation, 59 Temple Place - Suite 330,
27 * Boston, MA 02111-1307, USA.
29 * Please send any bug reports or fixes you make to the
31 * lksctp developers <lksctp-developers@lists.sourceforge.net>
33 * Or submit a bug report through the following website:
34 * http://www.sf.net/projects/lksctp
36 * Written or modified by:
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Karl Knutson <karl@athena.chicago.il.us>
39 * Perry Melange <pmelange@null.cc.uic.edu>
40 * Xingang Guo <xingang.guo@intel.com>
41 * Hui Huang <hui.huang@nokia.com>
42 * Sridhar Samudrala <sri@us.ibm.com>
43 * Jon Grimm <jgrimm@us.ibm.com>
45 * Any bugs reported given to us we will try to fix... any fixes shared will
46 * be incorporated into the next SCTP release.
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
51 #include <linux/types.h>
52 #include <linux/list.h> /* For struct list_head */
53 #include <linux/socket.h>
55 #include <linux/slab.h>
56 #include <net/sock.h> /* For skb_set_owner_w */
58 #include <net/sctp/sctp.h>
59 #include <net/sctp/sm.h>
61 /* Declare internal functions here. */
62 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
63 static void sctp_check_transmitted(struct sctp_outq *q,
64 struct list_head *transmitted_queue,
65 struct sctp_transport *transport,
66 struct sctp_sackhdr *sack,
67 __u32 *highest_new_tsn);
69 static void sctp_mark_missing(struct sctp_outq *q,
70 struct list_head *transmitted_queue,
71 struct sctp_transport *transport,
72 __u32 highest_new_tsn,
73 int count_of_newacks);
75 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
77 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout);
79 /* Add data to the front of the queue. */
80 static inline void sctp_outq_head_data(struct sctp_outq *q,
81 struct sctp_chunk *ch)
83 list_add(&ch->list, &q->out_chunk_list);
84 q->out_qlen += ch->skb->len;
87 /* Take data from the front of the queue. */
88 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
90 struct sctp_chunk *ch = NULL;
92 if (!list_empty(&q->out_chunk_list)) {
93 struct list_head *entry = q->out_chunk_list.next;
95 ch = list_entry(entry, struct sctp_chunk, list);
97 q->out_qlen -= ch->skb->len;
101 /* Add data chunk to the end of the queue. */
102 static inline void sctp_outq_tail_data(struct sctp_outq *q,
103 struct sctp_chunk *ch)
105 list_add_tail(&ch->list, &q->out_chunk_list);
106 q->out_qlen += ch->skb->len;
110 * SFR-CACC algorithm:
111 * D) If count_of_newacks is greater than or equal to 2
112 * and t was not sent to the current primary then the
113 * sender MUST NOT increment missing report count for t.
115 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
116 struct sctp_transport *transport,
117 int count_of_newacks)
119 if (count_of_newacks >=2 && transport != primary)
125 * SFR-CACC algorithm:
126 * F) If count_of_newacks is less than 2, let d be the
127 * destination to which t was sent. If cacc_saw_newack
128 * is 0 for destination d, then the sender MUST NOT
129 * increment missing report count for t.
131 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
132 int count_of_newacks)
134 if (count_of_newacks < 2 && !transport->cacc.cacc_saw_newack)
140 * SFR-CACC algorithm:
141 * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
142 * execute steps C, D, F.
144 * C has been implemented in sctp_outq_sack
146 static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,
147 struct sctp_transport *transport,
148 int count_of_newacks)
150 if (!primary->cacc.cycling_changeover) {
151 if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))
153 if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))
161 * SFR-CACC algorithm:
162 * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
163 * than next_tsn_at_change of the current primary, then
164 * the sender MUST NOT increment missing report count
167 static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
169 if (primary->cacc.cycling_changeover &&
170 TSN_lt(tsn, primary->cacc.next_tsn_at_change))
176 * SFR-CACC algorithm:
177 * 3) If the missing report count for TSN t is to be
178 * incremented according to [RFC2960] and
179 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
180 * then the sender MUST futher execute steps 3.1 and
181 * 3.2 to determine if the missing report count for
182 * TSN t SHOULD NOT be incremented.
184 * 3.3) If 3.1 and 3.2 do not dictate that the missing
185 * report count for t should not be incremented, then
186 * the sender SOULD increment missing report count for
187 * t (according to [RFC2960] and [SCTP_STEWART_2002]).
189 static inline int sctp_cacc_skip(struct sctp_transport *primary,
190 struct sctp_transport *transport,
191 int count_of_newacks,
194 if (primary->cacc.changeover_active &&
195 (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
196 sctp_cacc_skip_3_2(primary, tsn)))
201 /* Initialize an existing sctp_outq. This does the boring stuff.
202 * You still need to define handlers if you really want to DO
203 * something with this structure...
205 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
208 INIT_LIST_HEAD(&q->out_chunk_list);
209 INIT_LIST_HEAD(&q->control_chunk_list);
210 INIT_LIST_HEAD(&q->retransmit);
211 INIT_LIST_HEAD(&q->sacked);
212 INIT_LIST_HEAD(&q->abandoned);
215 q->outstanding_bytes = 0;
223 /* Free the outqueue structure and any related pending chunks.
225 void sctp_outq_teardown(struct sctp_outq *q)
227 struct sctp_transport *transport;
228 struct list_head *lchunk, *temp;
229 struct sctp_chunk *chunk, *tmp;
231 /* Throw away unacknowledged chunks. */
232 list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
234 while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
235 chunk = list_entry(lchunk, struct sctp_chunk,
237 /* Mark as part of a failed message. */
238 sctp_chunk_fail(chunk, q->error);
239 sctp_chunk_free(chunk);
243 /* Throw away chunks that have been gap ACKed. */
244 list_for_each_safe(lchunk, temp, &q->sacked) {
245 list_del_init(lchunk);
246 chunk = list_entry(lchunk, struct sctp_chunk,
248 sctp_chunk_fail(chunk, q->error);
249 sctp_chunk_free(chunk);
252 /* Throw away any chunks in the retransmit queue. */
253 list_for_each_safe(lchunk, temp, &q->retransmit) {
254 list_del_init(lchunk);
255 chunk = list_entry(lchunk, struct sctp_chunk,
257 sctp_chunk_fail(chunk, q->error);
258 sctp_chunk_free(chunk);
261 /* Throw away any chunks that are in the abandoned queue. */
262 list_for_each_safe(lchunk, temp, &q->abandoned) {
263 list_del_init(lchunk);
264 chunk = list_entry(lchunk, struct sctp_chunk,
266 sctp_chunk_fail(chunk, q->error);
267 sctp_chunk_free(chunk);
270 /* Throw away any leftover data chunks. */
271 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
273 /* Mark as send failure. */
274 sctp_chunk_fail(chunk, q->error);
275 sctp_chunk_free(chunk);
280 /* Throw away any leftover control chunks. */
281 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
282 list_del_init(&chunk->list);
283 sctp_chunk_free(chunk);
287 /* Free the outqueue structure and any related pending chunks. */
288 void sctp_outq_free(struct sctp_outq *q)
290 /* Throw away leftover chunks. */
291 sctp_outq_teardown(q);
293 /* If we were kmalloc()'d, free the memory. */
298 /* Put a new chunk in an sctp_outq. */
299 int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
303 SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n",
304 q, chunk, chunk && chunk->chunk_hdr ?
305 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
308 /* If it is data, queue it up, otherwise, send it
311 if (sctp_chunk_is_data(chunk)) {
312 /* Is it OK to queue data chunks? */
313 /* From 9. Termination of Association
315 * When either endpoint performs a shutdown, the
316 * association on each peer will stop accepting new
317 * data from its user and only deliver data in queue
318 * at the time of sending or receiving the SHUTDOWN
321 switch (q->asoc->state) {
322 case SCTP_STATE_EMPTY:
323 case SCTP_STATE_CLOSED:
324 case SCTP_STATE_SHUTDOWN_PENDING:
325 case SCTP_STATE_SHUTDOWN_SENT:
326 case SCTP_STATE_SHUTDOWN_RECEIVED:
327 case SCTP_STATE_SHUTDOWN_ACK_SENT:
328 /* Cannot send after transport endpoint shutdown */
333 SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n",
334 q, chunk, chunk && chunk->chunk_hdr ?
335 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
338 sctp_outq_tail_data(q, chunk);
339 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
340 SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS);
342 SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS);
347 list_add_tail(&chunk->list, &q->control_chunk_list);
348 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
355 error = sctp_outq_flush(q, 0);
360 /* Insert a chunk into the sorted list based on the TSNs. The retransmit list
361 * and the abandoned list are in ascending order.
363 static void sctp_insert_list(struct list_head *head, struct list_head *new)
365 struct list_head *pos;
366 struct sctp_chunk *nchunk, *lchunk;
370 nchunk = list_entry(new, struct sctp_chunk, transmitted_list);
371 ntsn = ntohl(nchunk->subh.data_hdr->tsn);
373 list_for_each(pos, head) {
374 lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);
375 ltsn = ntohl(lchunk->subh.data_hdr->tsn);
376 if (TSN_lt(ntsn, ltsn)) {
377 list_add(new, pos->prev);
383 list_add_tail(new, head);
386 /* Mark all the eligible packets on a transport for retransmission. */
387 void sctp_retransmit_mark(struct sctp_outq *q,
388 struct sctp_transport *transport,
391 struct list_head *lchunk, *ltemp;
392 struct sctp_chunk *chunk;
394 /* Walk through the specified transmitted queue. */
395 list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
396 chunk = list_entry(lchunk, struct sctp_chunk,
399 /* If the chunk is abandoned, move it to abandoned list. */
400 if (sctp_chunk_abandoned(chunk)) {
401 list_del_init(lchunk);
402 sctp_insert_list(&q->abandoned, lchunk);
404 /* If this chunk has not been previousely acked,
405 * stop considering it 'outstanding'. Our peer
406 * will most likely never see it since it will
407 * not be retransmitted
409 if (!chunk->tsn_gap_acked) {
410 if (chunk->transport)
411 chunk->transport->flight_size -=
412 sctp_data_size(chunk);
413 q->outstanding_bytes -= sctp_data_size(chunk);
414 q->asoc->peer.rwnd += (sctp_data_size(chunk) +
415 sizeof(struct sk_buff));
420 /* If we are doing retransmission due to a timeout or pmtu
421 * discovery, only the chunks that are not yet acked should
422 * be added to the retransmit queue.
424 if ((reason == SCTP_RTXR_FAST_RTX &&
425 (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
426 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
427 /* RFC 2960 6.2.1 Processing a Received SACK
429 * C) Any time a DATA chunk is marked for
430 * retransmission (via either T3-rtx timer expiration
431 * (Section 6.3.3) or via fast retransmit
432 * (Section 7.2.4)), add the data size of those
433 * chunks to the rwnd.
435 q->asoc->peer.rwnd += (sctp_data_size(chunk) +
436 sizeof(struct sk_buff));
437 q->outstanding_bytes -= sctp_data_size(chunk);
438 if (chunk->transport)
439 transport->flight_size -= sctp_data_size(chunk);
441 /* sctpimpguide-05 Section 2.8.2
442 * M5) If a T3-rtx timer expires, the
443 * 'TSN.Missing.Report' of all affected TSNs is set
446 chunk->tsn_missing_report = 0;
448 /* If a chunk that is being used for RTT measurement
449 * has to be retransmitted, we cannot use this chunk
450 * anymore for RTT measurements. Reset rto_pending so
451 * that a new RTT measurement is started when a new
452 * data chunk is sent.
454 if (chunk->rtt_in_progress) {
455 chunk->rtt_in_progress = 0;
456 transport->rto_pending = 0;
459 /* Move the chunk to the retransmit queue. The chunks
460 * on the retransmit queue are always kept in order.
462 list_del_init(lchunk);
463 sctp_insert_list(&q->retransmit, lchunk);
467 SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, "
468 "cwnd: %d, ssthresh: %d, flight_size: %d, "
469 "pba: %d\n", __func__,
471 transport->cwnd, transport->ssthresh,
472 transport->flight_size,
473 transport->partial_bytes_acked);
477 /* Mark all the eligible packets on a transport for retransmission and force
480 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
481 sctp_retransmit_reason_t reason)
486 case SCTP_RTXR_T3_RTX:
487 SCTP_INC_STATS(SCTP_MIB_T3_RETRANSMITS);
488 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
489 /* Update the retran path if the T3-rtx timer has expired for
490 * the current retran path.
492 if (transport == transport->asoc->peer.retran_path)
493 sctp_assoc_update_retran_path(transport->asoc);
494 transport->asoc->rtx_data_chunks +=
495 transport->asoc->unack_data;
497 case SCTP_RTXR_FAST_RTX:
498 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
499 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
502 case SCTP_RTXR_PMTUD:
503 SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);
505 case SCTP_RTXR_T1_RTX:
506 SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS);
507 transport->asoc->init_retries++;
513 sctp_retransmit_mark(q, transport, reason);
515 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
516 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
517 * following the procedures outlined in C1 - C5.
519 if (reason == SCTP_RTXR_T3_RTX)
520 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
522 /* Flush the queues only on timeout, since fast_rtx is only
523 * triggered during sack processing and the queue
524 * will be flushed at the end.
526 if (reason != SCTP_RTXR_FAST_RTX)
527 error = sctp_outq_flush(q, /* rtx_timeout */ 1);
530 q->asoc->base.sk->sk_err = -error;
534 * Transmit DATA chunks on the retransmit queue. Upon return from
535 * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
536 * need to be transmitted by the caller.
537 * We assume that pkt->transport has already been set.
539 * The return value is a normal kernel error return value.
541 static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
542 int rtx_timeout, int *start_timer)
544 struct list_head *lqueue;
545 struct sctp_transport *transport = pkt->transport;
547 struct sctp_chunk *chunk, *chunk1;
548 struct sctp_association *asoc;
555 lqueue = &q->retransmit;
556 fast_rtx = q->fast_rtx;
558 /* This loop handles time-out retransmissions, fast retransmissions,
559 * and retransmissions due to opening of whindow.
561 * RFC 2960 6.3.3 Handle T3-rtx Expiration
563 * E3) Determine how many of the earliest (i.e., lowest TSN)
564 * outstanding DATA chunks for the address for which the
565 * T3-rtx has expired will fit into a single packet, subject
566 * to the MTU constraint for the path corresponding to the
567 * destination transport address to which the retransmission
568 * is being sent (this may be different from the address for
569 * which the timer expires [see Section 6.4]). Call this value
570 * K. Bundle and retransmit those K DATA chunks in a single
571 * packet to the destination endpoint.
573 * [Just to be painfully clear, if we are retransmitting
574 * because a timeout just happened, we should send only ONE
575 * packet of retransmitted data.]
577 * For fast retransmissions we also send only ONE packet. However,
578 * if we are just flushing the queue due to open window, we'll
579 * try to send as much as possible.
581 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
583 /* Make sure that Gap Acked TSNs are not retransmitted. A
584 * simple approach is just to move such TSNs out of the
585 * way and into a 'transmitted' queue and skip to the
588 if (chunk->tsn_gap_acked) {
589 list_del(&chunk->transmitted_list);
590 list_add_tail(&chunk->transmitted_list,
591 &transport->transmitted);
595 /* If we are doing fast retransmit, ignore non-fast_rtransmit
598 if (fast_rtx && !chunk->fast_retransmit)
602 /* Attempt to append this chunk to the packet. */
603 status = sctp_packet_append_chunk(pkt, chunk);
606 case SCTP_XMIT_PMTU_FULL:
607 if (!pkt->has_data && !pkt->has_cookie_echo) {
608 /* If this packet did not contain DATA then
609 * retransmission did not happen, so do it
610 * again. We'll ignore the error here since
611 * control chunks are already freed so there
612 * is nothing we can do.
614 sctp_packet_transmit(pkt);
618 /* Send this packet. */
619 error = sctp_packet_transmit(pkt);
621 /* If we are retransmitting, we should only
622 * send a single packet.
624 if (rtx_timeout || fast_rtx)
627 /* Bundle next chunk in the next round. */
630 case SCTP_XMIT_RWND_FULL:
631 /* Send this packet. */
632 error = sctp_packet_transmit(pkt);
634 /* Stop sending DATA as there is no more room
640 case SCTP_XMIT_NAGLE_DELAY:
641 /* Send this packet. */
642 error = sctp_packet_transmit(pkt);
644 /* Stop sending DATA because of nagle delay. */
649 /* The append was successful, so add this chunk to
650 * the transmitted list.
652 list_del(&chunk->transmitted_list);
653 list_add_tail(&chunk->transmitted_list,
654 &transport->transmitted);
656 /* Mark the chunk as ineligible for fast retransmit
657 * after it is retransmitted.
659 if (chunk->fast_retransmit == SCTP_NEED_FRTX)
660 chunk->fast_retransmit = SCTP_DONT_FRTX;
666 /* Set the timer if there were no errors */
667 if (!error && !timer)
674 /* If we are here due to a retransmit timeout or a fast
675 * retransmit and if there are any chunks left in the retransmit
676 * queue that could not fit in the PMTU sized packet, they need
677 * to be marked as ineligible for a subsequent fast retransmit.
679 if (rtx_timeout || fast_rtx) {
680 list_for_each_entry(chunk1, lqueue, transmitted_list) {
681 if (chunk1->fast_retransmit == SCTP_NEED_FRTX)
682 chunk1->fast_retransmit = SCTP_DONT_FRTX;
686 *start_timer = timer;
688 /* Clear fast retransmit hint */
695 /* Cork the outqueue so queued chunks are really queued. */
696 int sctp_outq_uncork(struct sctp_outq *q)
701 error = sctp_outq_flush(q, 0);
707 * Try to flush an outqueue.
709 * Description: Send everything in q which we legally can, subject to
710 * congestion limitations.
711 * * Note: This function can be called from multiple contexts so appropriate
712 * locking concerns must be made. Today we use the sock lock to protect
715 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
717 struct sctp_packet *packet;
718 struct sctp_packet singleton;
719 struct sctp_association *asoc = q->asoc;
720 __u16 sport = asoc->base.bind_addr.port;
721 __u16 dport = asoc->peer.port;
722 __u32 vtag = asoc->peer.i.init_tag;
723 struct sctp_transport *transport = NULL;
724 struct sctp_transport *new_transport;
725 struct sctp_chunk *chunk, *tmp;
731 /* These transports have chunks to send. */
732 struct list_head transport_list;
733 struct list_head *ltransport;
735 INIT_LIST_HEAD(&transport_list);
741 * When bundling control chunks with DATA chunks, an
742 * endpoint MUST place control chunks first in the outbound
743 * SCTP packet. The transmitter MUST transmit DATA chunks
744 * within a SCTP packet in increasing order of TSN.
748 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
749 list_del_init(&chunk->list);
751 /* Pick the right transport to use. */
752 new_transport = chunk->transport;
754 if (!new_transport) {
756 * If we have a prior transport pointer, see if
757 * the destination address of the chunk
758 * matches the destination address of the
759 * current transport. If not a match, then
760 * try to look up the transport with a given
761 * destination address. We do this because
762 * after processing ASCONFs, we may have new
763 * transports created.
766 sctp_cmp_addr_exact(&chunk->dest,
768 new_transport = transport;
770 new_transport = sctp_assoc_lookup_paddr(asoc,
773 /* if we still don't have a new transport, then
774 * use the current active path.
777 new_transport = asoc->peer.active_path;
778 } else if ((new_transport->state == SCTP_INACTIVE) ||
779 (new_transport->state == SCTP_UNCONFIRMED)) {
780 /* If the chunk is Heartbeat or Heartbeat Ack,
781 * send it to chunk->transport, even if it's
784 * 3.3.6 Heartbeat Acknowledgement:
786 * A HEARTBEAT ACK is always sent to the source IP
787 * address of the IP datagram containing the
788 * HEARTBEAT chunk to which this ack is responding.
791 * ASCONF_ACKs also must be sent to the source.
793 if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT &&
794 chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK &&
795 chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK)
796 new_transport = asoc->peer.active_path;
799 /* Are we switching transports?
800 * Take care of transport locks.
802 if (new_transport != transport) {
803 transport = new_transport;
804 if (list_empty(&transport->send_ready)) {
805 list_add_tail(&transport->send_ready,
808 packet = &transport->packet;
809 sctp_packet_config(packet, vtag,
810 asoc->peer.ecn_capable);
813 switch (chunk->chunk_hdr->type) {
817 * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
818 * COMPLETE with any other chunks. [Send them immediately.]
821 case SCTP_CID_INIT_ACK:
822 case SCTP_CID_SHUTDOWN_COMPLETE:
823 sctp_packet_init(&singleton, transport, sport, dport);
824 sctp_packet_config(&singleton, vtag, 0);
825 sctp_packet_append_chunk(&singleton, chunk);
826 error = sctp_packet_transmit(&singleton);
832 if (sctp_test_T_bit(chunk)) {
833 packet->vtag = asoc->c.my_vtag;
835 /* The following chunks are "response" chunks, i.e.
836 * they are generated in response to something we
837 * received. If we are sending these, then we can
838 * send only 1 packet containing these chunks.
840 case SCTP_CID_HEARTBEAT_ACK:
841 case SCTP_CID_SHUTDOWN_ACK:
842 case SCTP_CID_COOKIE_ACK:
843 case SCTP_CID_COOKIE_ECHO:
845 case SCTP_CID_ECN_CWR:
846 case SCTP_CID_ASCONF_ACK:
851 case SCTP_CID_HEARTBEAT:
852 case SCTP_CID_SHUTDOWN:
853 case SCTP_CID_ECN_ECNE:
854 case SCTP_CID_ASCONF:
855 case SCTP_CID_FWD_TSN:
856 status = sctp_packet_transmit_chunk(packet, chunk,
858 if (status != SCTP_XMIT_OK) {
859 /* put the chunk back */
860 list_add(&chunk->list, &q->control_chunk_list);
861 } else if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
862 /* PR-SCTP C5) If a FORWARD TSN is sent, the
863 * sender MUST assure that at least one T3-rtx
866 sctp_transport_reset_timers(transport);
871 /* We built a chunk with an illegal type! */
876 /* Is it OK to send data chunks? */
877 switch (asoc->state) {
878 case SCTP_STATE_COOKIE_ECHOED:
879 /* Only allow bundling when this packet has a COOKIE-ECHO
882 if (!packet || !packet->has_cookie_echo)
886 case SCTP_STATE_ESTABLISHED:
887 case SCTP_STATE_SHUTDOWN_PENDING:
888 case SCTP_STATE_SHUTDOWN_RECEIVED:
890 * RFC 2960 6.1 Transmission of DATA Chunks
892 * C) When the time comes for the sender to transmit,
893 * before sending new DATA chunks, the sender MUST
894 * first transmit any outstanding DATA chunks which
895 * are marked for retransmission (limited by the
898 if (!list_empty(&q->retransmit)) {
899 if (transport == asoc->peer.retran_path)
902 /* Switch transports & prepare the packet. */
904 transport = asoc->peer.retran_path;
906 if (list_empty(&transport->send_ready)) {
907 list_add_tail(&transport->send_ready,
911 packet = &transport->packet;
912 sctp_packet_config(packet, vtag,
913 asoc->peer.ecn_capable);
915 error = sctp_outq_flush_rtx(q, packet,
916 rtx_timeout, &start_timer);
919 sctp_transport_reset_timers(transport);
921 /* This can happen on COOKIE-ECHO resend. Only
922 * one chunk can get bundled with a COOKIE-ECHO.
924 if (packet->has_cookie_echo)
927 /* Don't send new data if there is still data
928 * waiting to retransmit.
930 if (!list_empty(&q->retransmit))
934 /* Apply Max.Burst limitation to the current transport in
935 * case it will be used for new data. We are going to
936 * rest it before we return, but we want to apply the limit
937 * to the currently queued data.
940 sctp_transport_burst_limited(transport);
942 /* Finally, transmit new packets. */
943 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
944 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
947 if (chunk->sinfo.sinfo_stream >=
948 asoc->c.sinit_num_ostreams) {
950 /* Mark as failed send. */
951 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
952 sctp_chunk_free(chunk);
956 /* Has this chunk expired? */
957 if (sctp_chunk_abandoned(chunk)) {
958 sctp_chunk_fail(chunk, 0);
959 sctp_chunk_free(chunk);
963 /* If there is a specified transport, use it.
964 * Otherwise, we want to use the active path.
966 new_transport = chunk->transport;
967 if (!new_transport ||
968 ((new_transport->state == SCTP_INACTIVE) ||
969 (new_transport->state == SCTP_UNCONFIRMED)))
970 new_transport = asoc->peer.active_path;
972 /* Change packets if necessary. */
973 if (new_transport != transport) {
974 transport = new_transport;
976 /* Schedule to have this transport's
979 if (list_empty(&transport->send_ready)) {
980 list_add_tail(&transport->send_ready,
984 packet = &transport->packet;
985 sctp_packet_config(packet, vtag,
986 asoc->peer.ecn_capable);
987 /* We've switched transports, so apply the
988 * Burst limit to the new transport.
990 sctp_transport_burst_limited(transport);
993 SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ",
995 chunk && chunk->chunk_hdr ?
996 sctp_cname(SCTP_ST_CHUNK(
997 chunk->chunk_hdr->type))
1000 SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head "
1001 "%p skb->users %d.\n",
1002 ntohl(chunk->subh.data_hdr->tsn),
1003 chunk->skb ?chunk->skb->head : NULL,
1005 atomic_read(&chunk->skb->users) : -1);
1007 /* Add the chunk to the packet. */
1008 status = sctp_packet_transmit_chunk(packet, chunk, 0);
1011 case SCTP_XMIT_PMTU_FULL:
1012 case SCTP_XMIT_RWND_FULL:
1013 case SCTP_XMIT_NAGLE_DELAY:
1014 /* We could not append this chunk, so put
1015 * the chunk back on the output queue.
1017 SCTP_DEBUG_PRINTK("sctp_outq_flush: could "
1018 "not transmit TSN: 0x%x, status: %d\n",
1019 ntohl(chunk->subh.data_hdr->tsn),
1021 sctp_outq_head_data(q, chunk);
1022 goto sctp_flush_out;
1026 /* The sender is in the SHUTDOWN-PENDING state,
1027 * The sender MAY set the I-bit in the DATA
1030 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1031 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1039 /* BUG: We assume that the sctp_packet_transmit()
1040 * call below will succeed all the time and add the
1041 * chunk to the transmitted list and restart the
1043 * It is possible that the call can fail under OOM
1046 * Is this really a problem? Won't this behave
1049 list_add_tail(&chunk->transmitted_list,
1050 &transport->transmitted);
1052 sctp_transport_reset_timers(transport);
1056 /* Only let one DATA chunk get bundled with a
1057 * COOKIE-ECHO chunk.
1059 if (packet->has_cookie_echo)
1060 goto sctp_flush_out;
1071 /* Before returning, examine all the transports touched in
1072 * this call. Right now, we bluntly force clear all the
1073 * transports. Things might change after we implement Nagle.
1074 * But such an examination is still required.
1078 while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) {
1079 struct sctp_transport *t = list_entry(ltransport,
1080 struct sctp_transport,
1082 packet = &t->packet;
1083 if (!sctp_packet_empty(packet))
1084 error = sctp_packet_transmit(packet);
1086 /* Clear the burst limited state, if any */
1087 sctp_transport_burst_reset(t);
1093 /* Update unack_data based on the incoming SACK chunk */
1094 static void sctp_sack_update_unack_data(struct sctp_association *assoc,
1095 struct sctp_sackhdr *sack)
1097 sctp_sack_variable_t *frags;
1101 unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;
1103 frags = sack->variable;
1104 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {
1105 unack_data -= ((ntohs(frags[i].gab.end) -
1106 ntohs(frags[i].gab.start) + 1));
1109 assoc->unack_data = unack_data;
1112 /* This is where we REALLY process a SACK.
1114 * Process the SACK against the outqueue. Mostly, this just frees
1115 * things off the transmitted queue.
1117 int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1119 struct sctp_association *asoc = q->asoc;
1120 struct sctp_transport *transport;
1121 struct sctp_chunk *tchunk = NULL;
1122 struct list_head *lchunk, *transport_list, *temp;
1123 sctp_sack_variable_t *frags = sack->variable;
1124 __u32 sack_ctsn, ctsn, tsn;
1125 __u32 highest_tsn, highest_new_tsn;
1127 unsigned outstanding;
1128 struct sctp_transport *primary = asoc->peer.primary_path;
1129 int count_of_newacks = 0;
1133 /* Grab the association's destination address list. */
1134 transport_list = &asoc->peer.transport_addr_list;
1136 sack_ctsn = ntohl(sack->cum_tsn_ack);
1137 gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
1139 * SFR-CACC algorithm:
1140 * On receipt of a SACK the sender SHOULD execute the
1141 * following statements.
1143 * 1) If the cumulative ack in the SACK passes next tsn_at_change
1144 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
1145 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
1147 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
1148 * is set the receiver of the SACK MUST take the following actions:
1150 * A) Initialize the cacc_saw_newack to 0 for all destination
1153 * Only bother if changeover_active is set. Otherwise, this is
1154 * totally suboptimal to do on every SACK.
1156 if (primary->cacc.changeover_active) {
1157 u8 clear_cycling = 0;
1159 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
1160 primary->cacc.changeover_active = 0;
1164 if (clear_cycling || gap_ack_blocks) {
1165 list_for_each_entry(transport, transport_list,
1168 transport->cacc.cycling_changeover = 0;
1170 transport->cacc.cacc_saw_newack = 0;
1175 /* Get the highest TSN in the sack. */
1176 highest_tsn = sack_ctsn;
1178 highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
1180 if (TSN_lt(asoc->highest_sacked, highest_tsn))
1181 asoc->highest_sacked = highest_tsn;
1183 highest_new_tsn = sack_ctsn;
1185 /* Run through the retransmit queue. Credit bytes received
1186 * and free those chunks that we can.
1188 sctp_check_transmitted(q, &q->retransmit, NULL, sack, &highest_new_tsn);
1190 /* Run through the transmitted queue.
1191 * Credit bytes received and free those chunks which we can.
1193 * This is a MASSIVE candidate for optimization.
1195 list_for_each_entry(transport, transport_list, transports) {
1196 sctp_check_transmitted(q, &transport->transmitted,
1197 transport, sack, &highest_new_tsn);
1199 * SFR-CACC algorithm:
1200 * C) Let count_of_newacks be the number of
1201 * destinations for which cacc_saw_newack is set.
1203 if (transport->cacc.cacc_saw_newack)
1204 count_of_newacks ++;
1207 /* Move the Cumulative TSN Ack Point if appropriate. */
1208 if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
1209 asoc->ctsn_ack_point = sack_ctsn;
1213 if (gap_ack_blocks) {
1215 if (asoc->fast_recovery && accum_moved)
1216 highest_new_tsn = highest_tsn;
1218 list_for_each_entry(transport, transport_list, transports)
1219 sctp_mark_missing(q, &transport->transmitted, transport,
1220 highest_new_tsn, count_of_newacks);
1223 /* Update unack_data field in the assoc. */
1224 sctp_sack_update_unack_data(asoc, sack);
1226 ctsn = asoc->ctsn_ack_point;
1228 /* Throw away stuff rotting on the sack queue. */
1229 list_for_each_safe(lchunk, temp, &q->sacked) {
1230 tchunk = list_entry(lchunk, struct sctp_chunk,
1232 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1233 if (TSN_lte(tsn, ctsn)) {
1234 list_del_init(&tchunk->transmitted_list);
1235 sctp_chunk_free(tchunk);
1239 /* ii) Set rwnd equal to the newly received a_rwnd minus the
1240 * number of bytes still outstanding after processing the
1241 * Cumulative TSN Ack and the Gap Ack Blocks.
1244 sack_a_rwnd = ntohl(sack->a_rwnd);
1245 outstanding = q->outstanding_bytes;
1247 if (outstanding < sack_a_rwnd)
1248 sack_a_rwnd -= outstanding;
1252 asoc->peer.rwnd = sack_a_rwnd;
1254 sctp_generate_fwdtsn(q, sack_ctsn);
1256 SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n",
1257 __func__, sack_ctsn);
1258 SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, "
1259 "%p is 0x%x. Adv peer ack point: 0x%x\n",
1260 __func__, asoc, ctsn, asoc->adv_peer_ack_point);
1262 /* See if all chunks are acked.
1263 * Make sure the empty queue handler will get run later.
1265 q->empty = (list_empty(&q->out_chunk_list) &&
1266 list_empty(&q->retransmit));
1270 list_for_each_entry(transport, transport_list, transports) {
1271 q->empty = q->empty && list_empty(&transport->transmitted);
1276 SCTP_DEBUG_PRINTK("sack queue is empty.\n");
1281 /* Is the outqueue empty? */
1282 int sctp_outq_is_empty(const struct sctp_outq *q)
1287 /********************************************************************
1288 * 2nd Level Abstractions
1289 ********************************************************************/
1291 /* Go through a transport's transmitted list or the association's retransmit
1292 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
1293 * The retransmit list will not have an associated transport.
1295 * I added coherent debug information output. --xguo
1297 * Instead of printing 'sacked' or 'kept' for each TSN on the
1298 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
1299 * KEPT TSN6-TSN7, etc.
1301 static void sctp_check_transmitted(struct sctp_outq *q,
1302 struct list_head *transmitted_queue,
1303 struct sctp_transport *transport,
1304 struct sctp_sackhdr *sack,
1305 __u32 *highest_new_tsn_in_sack)
1307 struct list_head *lchunk;
1308 struct sctp_chunk *tchunk;
1309 struct list_head tlist;
1313 __u8 restart_timer = 0;
1314 int bytes_acked = 0;
1315 int migrate_bytes = 0;
1317 /* These state variables are for coherent debug output. --xguo */
1320 __u32 dbg_ack_tsn = 0; /* An ACKed TSN range starts here... */
1321 __u32 dbg_last_ack_tsn = 0; /* ...and finishes here. */
1322 __u32 dbg_kept_tsn = 0; /* An un-ACKed range starts here... */
1323 __u32 dbg_last_kept_tsn = 0; /* ...and finishes here. */
1325 /* 0 : The last TSN was ACKed.
1326 * 1 : The last TSN was NOT ACKed (i.e. KEPT).
1327 * -1: We need to initialize.
1329 int dbg_prt_state = -1;
1330 #endif /* SCTP_DEBUG */
1332 sack_ctsn = ntohl(sack->cum_tsn_ack);
1334 INIT_LIST_HEAD(&tlist);
1336 /* The while loop will skip empty transmitted queues. */
1337 while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {
1338 tchunk = list_entry(lchunk, struct sctp_chunk,
1341 if (sctp_chunk_abandoned(tchunk)) {
1342 /* Move the chunk to abandoned list. */
1343 sctp_insert_list(&q->abandoned, lchunk);
1345 /* If this chunk has not been acked, stop
1346 * considering it as 'outstanding'.
1348 if (!tchunk->tsn_gap_acked) {
1349 if (tchunk->transport)
1350 tchunk->transport->flight_size -=
1351 sctp_data_size(tchunk);
1352 q->outstanding_bytes -= sctp_data_size(tchunk);
1357 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1358 if (sctp_acked(sack, tsn)) {
1359 /* If this queue is the retransmit queue, the
1360 * retransmit timer has already reclaimed
1361 * the outstanding bytes for this chunk, so only
1362 * count bytes associated with a transport.
1365 /* If this chunk is being used for RTT
1366 * measurement, calculate the RTT and update
1367 * the RTO using this value.
1369 * 6.3.1 C5) Karn's algorithm: RTT measurements
1370 * MUST NOT be made using packets that were
1371 * retransmitted (and thus for which it is
1372 * ambiguous whether the reply was for the
1373 * first instance of the packet or a later
1376 if (!tchunk->tsn_gap_acked &&
1377 tchunk->rtt_in_progress) {
1378 tchunk->rtt_in_progress = 0;
1379 rtt = jiffies - tchunk->sent_at;
1380 sctp_transport_update_rto(transport,
1385 /* If the chunk hasn't been marked as ACKED,
1386 * mark it and account bytes_acked if the
1387 * chunk had a valid transport (it will not
1388 * have a transport if ASCONF had deleted it
1389 * while DATA was outstanding).
1391 if (!tchunk->tsn_gap_acked) {
1392 tchunk->tsn_gap_acked = 1;
1393 *highest_new_tsn_in_sack = tsn;
1394 bytes_acked += sctp_data_size(tchunk);
1395 if (!tchunk->transport)
1396 migrate_bytes += sctp_data_size(tchunk);
1399 if (TSN_lte(tsn, sack_ctsn)) {
1400 /* RFC 2960 6.3.2 Retransmission Timer Rules
1402 * R3) Whenever a SACK is received
1403 * that acknowledges the DATA chunk
1404 * with the earliest outstanding TSN
1405 * for that address, restart T3-rtx
1406 * timer for that address with its
1411 if (!tchunk->tsn_gap_acked) {
1413 * SFR-CACC algorithm:
1414 * 2) If the SACK contains gap acks
1415 * and the flag CHANGEOVER_ACTIVE is
1416 * set the receiver of the SACK MUST
1417 * take the following action:
1419 * B) For each TSN t being acked that
1420 * has not been acked in any SACK so
1421 * far, set cacc_saw_newack to 1 for
1422 * the destination that the TSN was
1426 sack->num_gap_ack_blocks &&
1427 q->asoc->peer.primary_path->cacc.
1429 transport->cacc.cacc_saw_newack
1433 list_add_tail(&tchunk->transmitted_list,
1436 /* RFC2960 7.2.4, sctpimpguide-05 2.8.2
1437 * M2) Each time a SACK arrives reporting
1438 * 'Stray DATA chunk(s)' record the highest TSN
1439 * reported as newly acknowledged, call this
1440 * value 'HighestTSNinSack'. A newly
1441 * acknowledged DATA chunk is one not
1442 * previously acknowledged in a SACK.
1444 * When the SCTP sender of data receives a SACK
1445 * chunk that acknowledges, for the first time,
1446 * the receipt of a DATA chunk, all the still
1447 * unacknowledged DATA chunks whose TSN is
1448 * older than that newly acknowledged DATA
1449 * chunk, are qualified as 'Stray DATA chunks'.
1451 list_add_tail(lchunk, &tlist);
1455 switch (dbg_prt_state) {
1456 case 0: /* last TSN was ACKed */
1457 if (dbg_last_ack_tsn + 1 == tsn) {
1458 /* This TSN belongs to the
1459 * current ACK range.
1464 if (dbg_last_ack_tsn != dbg_ack_tsn) {
1465 /* Display the end of the
1468 SCTP_DEBUG_PRINTK_CONT("-%08x",
1472 /* Start a new range. */
1473 SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);
1477 case 1: /* The last TSN was NOT ACKed. */
1478 if (dbg_last_kept_tsn != dbg_kept_tsn) {
1479 /* Display the end of current range. */
1480 SCTP_DEBUG_PRINTK_CONT("-%08x",
1484 SCTP_DEBUG_PRINTK_CONT("\n");
1486 /* FALL THROUGH... */
1488 /* This is the first-ever TSN we examined. */
1489 /* Start a new range of ACK-ed TSNs. */
1490 SCTP_DEBUG_PRINTK("ACKed: %08x", tsn);
1495 dbg_last_ack_tsn = tsn;
1496 #endif /* SCTP_DEBUG */
1499 if (tchunk->tsn_gap_acked) {
1500 SCTP_DEBUG_PRINTK("%s: Receiver reneged on "
1504 tchunk->tsn_gap_acked = 0;
1506 if (tchunk->transport)
1507 bytes_acked -= sctp_data_size(tchunk);
1509 /* RFC 2960 6.3.2 Retransmission Timer Rules
1511 * R4) Whenever a SACK is received missing a
1512 * TSN that was previously acknowledged via a
1513 * Gap Ack Block, start T3-rtx for the
1514 * destination address to which the DATA
1515 * chunk was originally
1516 * transmitted if it is not already running.
1521 list_add_tail(lchunk, &tlist);
1524 /* See the above comments on ACK-ed TSNs. */
1525 switch (dbg_prt_state) {
1527 if (dbg_last_kept_tsn + 1 == tsn)
1530 if (dbg_last_kept_tsn != dbg_kept_tsn)
1531 SCTP_DEBUG_PRINTK_CONT("-%08x",
1534 SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);
1539 if (dbg_last_ack_tsn != dbg_ack_tsn)
1540 SCTP_DEBUG_PRINTK_CONT("-%08x",
1542 SCTP_DEBUG_PRINTK_CONT("\n");
1544 /* FALL THROUGH... */
1546 SCTP_DEBUG_PRINTK("KEPT: %08x",tsn);
1551 dbg_last_kept_tsn = tsn;
1552 #endif /* SCTP_DEBUG */
1557 /* Finish off the last range, displaying its ending TSN. */
1558 switch (dbg_prt_state) {
1560 if (dbg_last_ack_tsn != dbg_ack_tsn) {
1561 SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_ack_tsn);
1563 SCTP_DEBUG_PRINTK_CONT("\n");
1568 if (dbg_last_kept_tsn != dbg_kept_tsn) {
1569 SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_kept_tsn);
1571 SCTP_DEBUG_PRINTK_CONT("\n");
1574 #endif /* SCTP_DEBUG */
1577 /* We may have counted DATA that was migrated
1578 * to this transport due to DEL-IP operation.
1579 * Subtract those bytes, since the were never
1580 * send on this transport and shouldn't be
1581 * credited to this transport.
1583 bytes_acked -= migrate_bytes;
1585 /* 8.2. When an outstanding TSN is acknowledged,
1586 * the endpoint shall clear the error counter of
1587 * the destination transport address to which the
1588 * DATA chunk was last sent.
1589 * The association's overall error counter is
1592 transport->error_count = 0;
1593 transport->asoc->overall_error_count = 0;
1595 /* Mark the destination transport address as
1596 * active if it is not so marked.
1598 if ((transport->state == SCTP_INACTIVE) ||
1599 (transport->state == SCTP_UNCONFIRMED)) {
1600 sctp_assoc_control_transport(
1604 SCTP_RECEIVED_SACK);
1607 sctp_transport_raise_cwnd(transport, sack_ctsn,
1610 transport->flight_size -= bytes_acked;
1611 if (transport->flight_size == 0)
1612 transport->partial_bytes_acked = 0;
1613 q->outstanding_bytes -= bytes_acked + migrate_bytes;
1615 /* RFC 2960 6.1, sctpimpguide-06 2.15.2
1616 * When a sender is doing zero window probing, it
1617 * should not timeout the association if it continues
1618 * to receive new packets from the receiver. The
1619 * reason is that the receiver MAY keep its window
1620 * closed for an indefinite time.
1621 * A sender is doing zero window probing when the
1622 * receiver's advertised window is zero, and there is
1623 * only one data chunk in flight to the receiver.
1625 if (!q->asoc->peer.rwnd &&
1626 !list_empty(&tlist) &&
1627 (sack_ctsn+2 == q->asoc->next_tsn)) {
1628 SCTP_DEBUG_PRINTK("%s: SACK received for zero "
1629 "window probe: %u\n",
1630 __func__, sack_ctsn);
1631 q->asoc->overall_error_count = 0;
1632 transport->error_count = 0;
1636 /* RFC 2960 6.3.2 Retransmission Timer Rules
1638 * R2) Whenever all outstanding data sent to an address have
1639 * been acknowledged, turn off the T3-rtx timer of that
1642 if (!transport->flight_size) {
1643 if (timer_pending(&transport->T3_rtx_timer) &&
1644 del_timer(&transport->T3_rtx_timer)) {
1645 sctp_transport_put(transport);
1647 } else if (restart_timer) {
1648 if (!mod_timer(&transport->T3_rtx_timer,
1649 jiffies + transport->rto))
1650 sctp_transport_hold(transport);
1654 list_splice(&tlist, transmitted_queue);
1657 /* Mark chunks as missing and consequently may get retransmitted. */
1658 static void sctp_mark_missing(struct sctp_outq *q,
1659 struct list_head *transmitted_queue,
1660 struct sctp_transport *transport,
1661 __u32 highest_new_tsn_in_sack,
1662 int count_of_newacks)
1664 struct sctp_chunk *chunk;
1666 char do_fast_retransmit = 0;
1667 struct sctp_association *asoc = q->asoc;
1668 struct sctp_transport *primary = asoc->peer.primary_path;
1670 list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
1672 tsn = ntohl(chunk->subh.data_hdr->tsn);
1674 /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
1675 * 'Unacknowledged TSN's', if the TSN number of an
1676 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
1677 * value, increment the 'TSN.Missing.Report' count on that
1678 * chunk if it has NOT been fast retransmitted or marked for
1679 * fast retransmit already.
1681 if (chunk->fast_retransmit == SCTP_CAN_FRTX &&
1682 !chunk->tsn_gap_acked &&
1683 TSN_lt(tsn, highest_new_tsn_in_sack)) {
1685 /* SFR-CACC may require us to skip marking
1686 * this chunk as missing.
1688 if (!transport || !sctp_cacc_skip(primary, transport,
1689 count_of_newacks, tsn)) {
1690 chunk->tsn_missing_report++;
1693 "%s: TSN 0x%x missing counter: %d\n",
1695 chunk->tsn_missing_report);
1699 * M4) If any DATA chunk is found to have a
1700 * 'TSN.Missing.Report'
1701 * value larger than or equal to 3, mark that chunk for
1702 * retransmission and start the fast retransmit procedure.
1705 if (chunk->tsn_missing_report >= 3) {
1706 chunk->fast_retransmit = SCTP_NEED_FRTX;
1707 do_fast_retransmit = 1;
1712 if (do_fast_retransmit)
1713 sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
1715 SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, "
1716 "ssthresh: %d, flight_size: %d, pba: %d\n",
1717 __func__, transport, transport->cwnd,
1718 transport->ssthresh, transport->flight_size,
1719 transport->partial_bytes_acked);
1723 /* Is the given TSN acked by this packet? */
1724 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
1727 sctp_sack_variable_t *frags;
1729 __u32 ctsn = ntohl(sack->cum_tsn_ack);
1731 if (TSN_lte(tsn, ctsn))
1734 /* 3.3.4 Selective Acknowledgement (SACK) (3):
1737 * These fields contain the Gap Ack Blocks. They are repeated
1738 * for each Gap Ack Block up to the number of Gap Ack Blocks
1739 * defined in the Number of Gap Ack Blocks field. All DATA
1740 * chunks with TSNs greater than or equal to (Cumulative TSN
1741 * Ack + Gap Ack Block Start) and less than or equal to
1742 * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
1743 * Block are assumed to have been received correctly.
1746 frags = sack->variable;
1748 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) {
1749 if (TSN_lte(ntohs(frags[i].gab.start), gap) &&
1750 TSN_lte(gap, ntohs(frags[i].gab.end)))
1759 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
1760 int nskips, __be16 stream)
1764 for (i = 0; i < nskips; i++) {
1765 if (skiplist[i].stream == stream)
1771 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */
1772 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1774 struct sctp_association *asoc = q->asoc;
1775 struct sctp_chunk *ftsn_chunk = NULL;
1776 struct sctp_fwdtsn_skip ftsn_skip_arr[10];
1780 struct sctp_chunk *chunk;
1781 struct list_head *lchunk, *temp;
1783 if (!asoc->peer.prsctp_capable)
1786 /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
1789 * If (Advanced.Peer.Ack.Point < SackCumAck), then update
1790 * Advanced.Peer.Ack.Point to be equal to SackCumAck.
1792 if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1793 asoc->adv_peer_ack_point = ctsn;
1795 /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
1796 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
1797 * the chunk next in the out-queue space is marked as "abandoned" as
1798 * shown in the following example:
1800 * Assuming that a SACK arrived with the Cumulative TSN ACK 102
1801 * and the Advanced.Peer.Ack.Point is updated to this value:
1803 * out-queue at the end of ==> out-queue after Adv.Ack.Point
1804 * normal SACK processing local advancement
1806 * Adv.Ack.Pt-> 102 acked 102 acked
1807 * 103 abandoned 103 abandoned
1808 * 104 abandoned Adv.Ack.P-> 104 abandoned
1810 * 106 acked 106 acked
1813 * In this example, the data sender successfully advanced the
1814 * "Advanced.Peer.Ack.Point" from 102 to 104 locally.
1816 list_for_each_safe(lchunk, temp, &q->abandoned) {
1817 chunk = list_entry(lchunk, struct sctp_chunk,
1819 tsn = ntohl(chunk->subh.data_hdr->tsn);
1821 /* Remove any chunks in the abandoned queue that are acked by
1824 if (TSN_lte(tsn, ctsn)) {
1825 list_del_init(lchunk);
1826 sctp_chunk_free(chunk);
1828 if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {
1829 asoc->adv_peer_ack_point = tsn;
1830 if (chunk->chunk_hdr->flags &
1831 SCTP_DATA_UNORDERED)
1833 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],
1835 chunk->subh.data_hdr->stream);
1836 ftsn_skip_arr[skip_pos].stream =
1837 chunk->subh.data_hdr->stream;
1838 ftsn_skip_arr[skip_pos].ssn =
1839 chunk->subh.data_hdr->ssn;
1840 if (skip_pos == nskips)
1849 /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
1850 * is greater than the Cumulative TSN ACK carried in the received
1851 * SACK, the data sender MUST send the data receiver a FORWARD TSN
1852 * chunk containing the latest value of the
1853 * "Advanced.Peer.Ack.Point".
1855 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
1856 * list each stream and sequence number in the forwarded TSN. This
1857 * information will enable the receiver to easily find any
1858 * stranded TSN's waiting on stream reorder queues. Each stream
1859 * SHOULD only be reported once; this means that if multiple
1860 * abandoned messages occur in the same stream then only the
1861 * highest abandoned stream sequence number is reported. If the
1862 * total size of the FORWARD TSN does NOT fit in a single MTU then
1863 * the sender of the FORWARD TSN SHOULD lower the
1864 * Advanced.Peer.Ack.Point to the last TSN that will fit in a
1867 if (asoc->adv_peer_ack_point > ctsn)
1868 ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
1869 nskips, &ftsn_skip_arr[0]);
1872 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1873 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);