1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 Intel Corp.
7 * This file is part of the SCTP kernel implementation
9 * These functions implement the sctp_outq class. The outqueue handles
10 * bundling and queueing of outgoing SCTP chunks.
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, see
26 * <http://www.gnu.org/licenses/>.
28 * Please send any bug reports or fixes you make to the
30 * lksctp developers <linux-sctp@vger.kernel.org>
32 * Written or modified by:
33 * La Monte H.P. Yarroll <piggy@acm.org>
34 * Karl Knutson <karl@athena.chicago.il.us>
35 * Perry Melange <pmelange@null.cc.uic.edu>
36 * Xingang Guo <xingang.guo@intel.com>
37 * Hui Huang <hui.huang@nokia.com>
38 * Sridhar Samudrala <sri@us.ibm.com>
39 * Jon Grimm <jgrimm@us.ibm.com>
42 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 #include <linux/types.h>
45 #include <linux/list.h> /* For struct list_head */
46 #include <linux/socket.h>
48 #include <linux/slab.h>
49 #include <net/sock.h> /* For skb_set_owner_w */
51 #include <net/sctp/sctp.h>
52 #include <net/sctp/sm.h>
53 #include <net/sctp/stream_sched.h>
55 /* Declare internal functions here. */
56 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
57 static void sctp_check_transmitted(struct sctp_outq *q,
58 struct list_head *transmitted_queue,
59 struct sctp_transport *transport,
60 union sctp_addr *saddr,
61 struct sctp_sackhdr *sack,
62 __u32 *highest_new_tsn);
64 static void sctp_mark_missing(struct sctp_outq *q,
65 struct list_head *transmitted_queue,
66 struct sctp_transport *transport,
67 __u32 highest_new_tsn,
68 int count_of_newacks);
70 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
72 static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
74 /* Add data to the front of the queue. */
75 static inline void sctp_outq_head_data(struct sctp_outq *q,
76 struct sctp_chunk *ch)
78 struct sctp_stream_out_ext *oute;
81 list_add(&ch->list, &q->out_chunk_list);
82 q->out_qlen += ch->skb->len;
84 stream = sctp_chunk_stream_no(ch);
85 oute = q->asoc->stream.out[stream].ext;
86 list_add(&ch->stream_list, &oute->outq);
89 /* Take data from the front of the queue. */
90 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
92 return q->sched->dequeue(q);
95 /* Add data chunk to the end of the queue. */
96 static inline void sctp_outq_tail_data(struct sctp_outq *q,
97 struct sctp_chunk *ch)
99 struct sctp_stream_out_ext *oute;
102 list_add_tail(&ch->list, &q->out_chunk_list);
103 q->out_qlen += ch->skb->len;
105 stream = sctp_chunk_stream_no(ch);
106 oute = q->asoc->stream.out[stream].ext;
107 list_add_tail(&ch->stream_list, &oute->outq);
111 * SFR-CACC algorithm:
112 * D) If count_of_newacks is greater than or equal to 2
113 * and t was not sent to the current primary then the
114 * sender MUST NOT increment missing report count for t.
116 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
117 struct sctp_transport *transport,
118 int count_of_newacks)
120 if (count_of_newacks >= 2 && transport != primary)
126 * SFR-CACC algorithm:
127 * F) If count_of_newacks is less than 2, let d be the
128 * destination to which t was sent. If cacc_saw_newack
129 * is 0 for destination d, then the sender MUST NOT
130 * increment missing report count for t.
132 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
133 int count_of_newacks)
135 if (count_of_newacks < 2 &&
136 (transport && !transport->cacc.cacc_saw_newack))
142 * SFR-CACC algorithm:
143 * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
144 * execute steps C, D, F.
146 * C has been implemented in sctp_outq_sack
148 static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,
149 struct sctp_transport *transport,
150 int count_of_newacks)
152 if (!primary->cacc.cycling_changeover) {
153 if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))
155 if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))
163 * SFR-CACC algorithm:
164 * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
165 * than next_tsn_at_change of the current primary, then
166 * the sender MUST NOT increment missing report count
169 static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
171 if (primary->cacc.cycling_changeover &&
172 TSN_lt(tsn, primary->cacc.next_tsn_at_change))
178 * SFR-CACC algorithm:
179 * 3) If the missing report count for TSN t is to be
180 * incremented according to [RFC2960] and
181 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
182 * then the sender MUST further execute steps 3.1 and
183 * 3.2 to determine if the missing report count for
184 * TSN t SHOULD NOT be incremented.
186 * 3.3) If 3.1 and 3.2 do not dictate that the missing
187 * report count for t should not be incremented, then
188 * the sender SHOULD increment missing report count for
189 * t (according to [RFC2960] and [SCTP_STEWART_2002]).
191 static inline int sctp_cacc_skip(struct sctp_transport *primary,
192 struct sctp_transport *transport,
193 int count_of_newacks,
196 if (primary->cacc.changeover_active &&
197 (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
198 sctp_cacc_skip_3_2(primary, tsn)))
203 /* Initialize an existing sctp_outq. This does the boring stuff.
204 * You still need to define handlers if you really want to DO
205 * something with this structure...
207 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
209 memset(q, 0, sizeof(struct sctp_outq));
212 INIT_LIST_HEAD(&q->out_chunk_list);
213 INIT_LIST_HEAD(&q->control_chunk_list);
214 INIT_LIST_HEAD(&q->retransmit);
215 INIT_LIST_HEAD(&q->sacked);
216 INIT_LIST_HEAD(&q->abandoned);
217 sctp_sched_set_sched(asoc, SCTP_SS_FCFS);
220 /* Free the outqueue structure and any related pending chunks.
222 static void __sctp_outq_teardown(struct sctp_outq *q)
224 struct sctp_transport *transport;
225 struct list_head *lchunk, *temp;
226 struct sctp_chunk *chunk, *tmp;
228 /* Throw away unacknowledged chunks. */
229 list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
231 while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
232 chunk = list_entry(lchunk, struct sctp_chunk,
234 /* Mark as part of a failed message. */
235 sctp_chunk_fail(chunk, q->error);
236 sctp_chunk_free(chunk);
240 /* Throw away chunks that have been gap ACKed. */
241 list_for_each_safe(lchunk, temp, &q->sacked) {
242 list_del_init(lchunk);
243 chunk = list_entry(lchunk, struct sctp_chunk,
245 sctp_chunk_fail(chunk, q->error);
246 sctp_chunk_free(chunk);
249 /* Throw away any chunks in the retransmit queue. */
250 list_for_each_safe(lchunk, temp, &q->retransmit) {
251 list_del_init(lchunk);
252 chunk = list_entry(lchunk, struct sctp_chunk,
254 sctp_chunk_fail(chunk, q->error);
255 sctp_chunk_free(chunk);
258 /* Throw away any chunks that are in the abandoned queue. */
259 list_for_each_safe(lchunk, temp, &q->abandoned) {
260 list_del_init(lchunk);
261 chunk = list_entry(lchunk, struct sctp_chunk,
263 sctp_chunk_fail(chunk, q->error);
264 sctp_chunk_free(chunk);
267 /* Throw away any leftover data chunks. */
268 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
269 sctp_sched_dequeue_done(q, chunk);
271 /* Mark as send failure. */
272 sctp_chunk_fail(chunk, q->error);
273 sctp_chunk_free(chunk);
276 /* Throw away any leftover control chunks. */
277 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
278 list_del_init(&chunk->list);
279 sctp_chunk_free(chunk);
283 void sctp_outq_teardown(struct sctp_outq *q)
285 __sctp_outq_teardown(q);
286 sctp_outq_init(q->asoc, q);
289 /* Free the outqueue structure and any related pending chunks. */
290 void sctp_outq_free(struct sctp_outq *q)
292 /* Throw away leftover chunks. */
293 __sctp_outq_teardown(q);
296 /* Put a new chunk in an sctp_outq. */
297 void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
299 struct net *net = sock_net(q->asoc->base.sk);
301 pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk,
302 chunk && chunk->chunk_hdr ?
303 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
306 /* If it is data, queue it up, otherwise, send it
309 if (sctp_chunk_is_data(chunk)) {
310 pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
311 __func__, q, chunk, chunk && chunk->chunk_hdr ?
312 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
315 sctp_outq_tail_data(q, chunk);
316 if (chunk->asoc->peer.prsctp_capable &&
317 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
318 chunk->asoc->sent_cnt_removable++;
319 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
320 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
322 SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
324 list_add_tail(&chunk->list, &q->control_chunk_list);
325 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
329 sctp_outq_flush(q, 0, gfp);
332 /* Insert a chunk into the sorted list based on the TSNs. The retransmit list
333 * and the abandoned list are in ascending order.
335 static void sctp_insert_list(struct list_head *head, struct list_head *new)
337 struct list_head *pos;
338 struct sctp_chunk *nchunk, *lchunk;
342 nchunk = list_entry(new, struct sctp_chunk, transmitted_list);
343 ntsn = ntohl(nchunk->subh.data_hdr->tsn);
345 list_for_each(pos, head) {
346 lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);
347 ltsn = ntohl(lchunk->subh.data_hdr->tsn);
348 if (TSN_lt(ntsn, ltsn)) {
349 list_add(new, pos->prev);
355 list_add_tail(new, head);
358 static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
359 struct sctp_sndrcvinfo *sinfo,
360 struct list_head *queue, int msg_len)
362 struct sctp_chunk *chk, *temp;
364 list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
365 struct sctp_stream_out *streamout;
367 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
368 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
371 list_del_init(&chk->transmitted_list);
372 sctp_insert_list(&asoc->outqueue.abandoned,
373 &chk->transmitted_list);
375 streamout = &asoc->stream.out[chk->sinfo.sinfo_stream];
376 asoc->sent_cnt_removable--;
377 asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
378 streamout->ext->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
380 if (queue != &asoc->outqueue.retransmit &&
381 !chk->tsn_gap_acked) {
383 chk->transport->flight_size -=
385 asoc->outqueue.outstanding_bytes -= sctp_data_size(chk);
388 msg_len -= SCTP_DATA_SNDSIZE(chk) +
389 sizeof(struct sk_buff) +
390 sizeof(struct sctp_chunk);
398 static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
399 struct sctp_sndrcvinfo *sinfo, int msg_len)
401 struct sctp_outq *q = &asoc->outqueue;
402 struct sctp_chunk *chk, *temp;
404 q->sched->unsched_all(&asoc->stream);
406 list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
407 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
408 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
411 sctp_sched_dequeue_common(q, chk);
412 asoc->sent_cnt_removable--;
413 asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
414 if (chk->sinfo.sinfo_stream < asoc->stream.outcnt) {
415 struct sctp_stream_out *streamout =
416 &asoc->stream.out[chk->sinfo.sinfo_stream];
418 streamout->ext->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
421 msg_len -= SCTP_DATA_SNDSIZE(chk) +
422 sizeof(struct sk_buff) +
423 sizeof(struct sctp_chunk);
424 sctp_chunk_free(chk);
429 q->sched->sched_all(&asoc->stream);
434 /* Abandon the chunks according their priorities */
435 void sctp_prsctp_prune(struct sctp_association *asoc,
436 struct sctp_sndrcvinfo *sinfo, int msg_len)
438 struct sctp_transport *transport;
440 if (!asoc->peer.prsctp_capable || !asoc->sent_cnt_removable)
443 msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
444 &asoc->outqueue.retransmit,
449 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
451 msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
452 &transport->transmitted,
458 sctp_prsctp_prune_unsent(asoc, sinfo, msg_len);
461 /* Mark all the eligible packets on a transport for retransmission. */
462 void sctp_retransmit_mark(struct sctp_outq *q,
463 struct sctp_transport *transport,
466 struct list_head *lchunk, *ltemp;
467 struct sctp_chunk *chunk;
469 /* Walk through the specified transmitted queue. */
470 list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
471 chunk = list_entry(lchunk, struct sctp_chunk,
474 /* If the chunk is abandoned, move it to abandoned list. */
475 if (sctp_chunk_abandoned(chunk)) {
476 list_del_init(lchunk);
477 sctp_insert_list(&q->abandoned, lchunk);
479 /* If this chunk has not been previousely acked,
480 * stop considering it 'outstanding'. Our peer
481 * will most likely never see it since it will
482 * not be retransmitted
484 if (!chunk->tsn_gap_acked) {
485 if (chunk->transport)
486 chunk->transport->flight_size -=
487 sctp_data_size(chunk);
488 q->outstanding_bytes -= sctp_data_size(chunk);
489 q->asoc->peer.rwnd += sctp_data_size(chunk);
494 /* If we are doing retransmission due to a timeout or pmtu
495 * discovery, only the chunks that are not yet acked should
496 * be added to the retransmit queue.
498 if ((reason == SCTP_RTXR_FAST_RTX &&
499 (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
500 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
501 /* RFC 2960 6.2.1 Processing a Received SACK
503 * C) Any time a DATA chunk is marked for
504 * retransmission (via either T3-rtx timer expiration
505 * (Section 6.3.3) or via fast retransmit
506 * (Section 7.2.4)), add the data size of those
507 * chunks to the rwnd.
509 q->asoc->peer.rwnd += sctp_data_size(chunk);
510 q->outstanding_bytes -= sctp_data_size(chunk);
511 if (chunk->transport)
512 transport->flight_size -= sctp_data_size(chunk);
514 /* sctpimpguide-05 Section 2.8.2
515 * M5) If a T3-rtx timer expires, the
516 * 'TSN.Missing.Report' of all affected TSNs is set
519 chunk->tsn_missing_report = 0;
521 /* If a chunk that is being used for RTT measurement
522 * has to be retransmitted, we cannot use this chunk
523 * anymore for RTT measurements. Reset rto_pending so
524 * that a new RTT measurement is started when a new
525 * data chunk is sent.
527 if (chunk->rtt_in_progress) {
528 chunk->rtt_in_progress = 0;
529 transport->rto_pending = 0;
532 /* Move the chunk to the retransmit queue. The chunks
533 * on the retransmit queue are always kept in order.
535 list_del_init(lchunk);
536 sctp_insert_list(&q->retransmit, lchunk);
540 pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, "
541 "flight_size:%d, pba:%d\n", __func__, transport, reason,
542 transport->cwnd, transport->ssthresh, transport->flight_size,
543 transport->partial_bytes_acked);
546 /* Mark all the eligible packets on a transport for retransmission and force
549 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
550 enum sctp_retransmit_reason reason)
552 struct net *net = sock_net(q->asoc->base.sk);
555 case SCTP_RTXR_T3_RTX:
556 SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
557 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
558 /* Update the retran path if the T3-rtx timer has expired for
559 * the current retran path.
561 if (transport == transport->asoc->peer.retran_path)
562 sctp_assoc_update_retran_path(transport->asoc);
563 transport->asoc->rtx_data_chunks +=
564 transport->asoc->unack_data;
566 case SCTP_RTXR_FAST_RTX:
567 SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
568 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
571 case SCTP_RTXR_PMTUD:
572 SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS);
574 case SCTP_RTXR_T1_RTX:
575 SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS);
576 transport->asoc->init_retries++;
582 sctp_retransmit_mark(q, transport, reason);
584 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
585 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
586 * following the procedures outlined in C1 - C5.
588 if (reason == SCTP_RTXR_T3_RTX)
589 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
591 /* Flush the queues only on timeout, since fast_rtx is only
592 * triggered during sack processing and the queue
593 * will be flushed at the end.
595 if (reason != SCTP_RTXR_FAST_RTX)
596 sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC);
600 * Transmit DATA chunks on the retransmit queue. Upon return from
601 * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
602 * need to be transmitted by the caller.
603 * We assume that pkt->transport has already been set.
605 * The return value is a normal kernel error return value.
607 static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
608 int rtx_timeout, int *start_timer)
610 struct sctp_transport *transport = pkt->transport;
611 struct sctp_chunk *chunk, *chunk1;
612 struct list_head *lqueue;
613 enum sctp_xmit status;
619 lqueue = &q->retransmit;
620 fast_rtx = q->fast_rtx;
622 /* This loop handles time-out retransmissions, fast retransmissions,
623 * and retransmissions due to opening of whindow.
625 * RFC 2960 6.3.3 Handle T3-rtx Expiration
627 * E3) Determine how many of the earliest (i.e., lowest TSN)
628 * outstanding DATA chunks for the address for which the
629 * T3-rtx has expired will fit into a single packet, subject
630 * to the MTU constraint for the path corresponding to the
631 * destination transport address to which the retransmission
632 * is being sent (this may be different from the address for
633 * which the timer expires [see Section 6.4]). Call this value
634 * K. Bundle and retransmit those K DATA chunks in a single
635 * packet to the destination endpoint.
637 * [Just to be painfully clear, if we are retransmitting
638 * because a timeout just happened, we should send only ONE
639 * packet of retransmitted data.]
641 * For fast retransmissions we also send only ONE packet. However,
642 * if we are just flushing the queue due to open window, we'll
643 * try to send as much as possible.
645 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
646 /* If the chunk is abandoned, move it to abandoned list. */
647 if (sctp_chunk_abandoned(chunk)) {
648 list_del_init(&chunk->transmitted_list);
649 sctp_insert_list(&q->abandoned,
650 &chunk->transmitted_list);
654 /* Make sure that Gap Acked TSNs are not retransmitted. A
655 * simple approach is just to move such TSNs out of the
656 * way and into a 'transmitted' queue and skip to the
659 if (chunk->tsn_gap_acked) {
660 list_move_tail(&chunk->transmitted_list,
661 &transport->transmitted);
665 /* If we are doing fast retransmit, ignore non-fast_rtransmit
668 if (fast_rtx && !chunk->fast_retransmit)
672 /* Attempt to append this chunk to the packet. */
673 status = sctp_packet_append_chunk(pkt, chunk);
676 case SCTP_XMIT_PMTU_FULL:
677 if (!pkt->has_data && !pkt->has_cookie_echo) {
678 /* If this packet did not contain DATA then
679 * retransmission did not happen, so do it
680 * again. We'll ignore the error here since
681 * control chunks are already freed so there
682 * is nothing we can do.
684 sctp_packet_transmit(pkt, GFP_ATOMIC);
688 /* Send this packet. */
689 error = sctp_packet_transmit(pkt, GFP_ATOMIC);
691 /* If we are retransmitting, we should only
692 * send a single packet.
693 * Otherwise, try appending this chunk again.
695 if (rtx_timeout || fast_rtx)
700 /* Bundle next chunk in the next round. */
703 case SCTP_XMIT_RWND_FULL:
704 /* Send this packet. */
705 error = sctp_packet_transmit(pkt, GFP_ATOMIC);
707 /* Stop sending DATA as there is no more room
713 case SCTP_XMIT_DELAY:
714 /* Send this packet. */
715 error = sctp_packet_transmit(pkt, GFP_ATOMIC);
717 /* Stop sending DATA because of nagle delay. */
722 /* The append was successful, so add this chunk to
723 * the transmitted list.
725 list_move_tail(&chunk->transmitted_list,
726 &transport->transmitted);
728 /* Mark the chunk as ineligible for fast retransmit
729 * after it is retransmitted.
731 if (chunk->fast_retransmit == SCTP_NEED_FRTX)
732 chunk->fast_retransmit = SCTP_DONT_FRTX;
734 q->asoc->stats.rtxchunks++;
738 /* Set the timer if there were no errors */
739 if (!error && !timer)
746 /* If we are here due to a retransmit timeout or a fast
747 * retransmit and if there are any chunks left in the retransmit
748 * queue that could not fit in the PMTU sized packet, they need
749 * to be marked as ineligible for a subsequent fast retransmit.
751 if (rtx_timeout || fast_rtx) {
752 list_for_each_entry(chunk1, lqueue, transmitted_list) {
753 if (chunk1->fast_retransmit == SCTP_NEED_FRTX)
754 chunk1->fast_retransmit = SCTP_DONT_FRTX;
758 *start_timer = timer;
760 /* Clear fast retransmit hint */
767 /* Cork the outqueue so queued chunks are really queued. */
768 void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
773 sctp_outq_flush(q, 0, gfp);
778 * Try to flush an outqueue.
780 * Description: Send everything in q which we legally can, subject to
781 * congestion limitations.
782 * * Note: This function can be called from multiple contexts so appropriate
783 * locking concerns must be made. Today we use the sock lock to protect
786 static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
788 struct sctp_packet *packet;
789 struct sctp_packet singleton;
790 struct sctp_association *asoc = q->asoc;
791 __u16 sport = asoc->base.bind_addr.port;
792 __u16 dport = asoc->peer.port;
793 __u32 vtag = asoc->peer.i.init_tag;
794 struct sctp_transport *transport = NULL;
795 struct sctp_transport *new_transport;
796 struct sctp_chunk *chunk, *tmp;
797 enum sctp_xmit status;
802 /* These transports have chunks to send. */
803 struct list_head transport_list;
804 struct list_head *ltransport;
806 INIT_LIST_HEAD(&transport_list);
812 * When bundling control chunks with DATA chunks, an
813 * endpoint MUST place control chunks first in the outbound
814 * SCTP packet. The transmitter MUST transmit DATA chunks
815 * within a SCTP packet in increasing order of TSN.
819 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
821 * F1) This means that until such time as the ASCONF
822 * containing the add is acknowledged, the sender MUST
823 * NOT use the new IP address as a source for ANY SCTP
824 * packet except on carrying an ASCONF Chunk.
826 if (asoc->src_out_of_asoc_ok &&
827 chunk->chunk_hdr->type != SCTP_CID_ASCONF)
830 list_del_init(&chunk->list);
832 /* Pick the right transport to use. */
833 new_transport = chunk->transport;
835 if (!new_transport) {
837 * If we have a prior transport pointer, see if
838 * the destination address of the chunk
839 * matches the destination address of the
840 * current transport. If not a match, then
841 * try to look up the transport with a given
842 * destination address. We do this because
843 * after processing ASCONFs, we may have new
844 * transports created.
847 sctp_cmp_addr_exact(&chunk->dest,
849 new_transport = transport;
851 new_transport = sctp_assoc_lookup_paddr(asoc,
854 /* if we still don't have a new transport, then
855 * use the current active path.
858 new_transport = asoc->peer.active_path;
859 } else if ((new_transport->state == SCTP_INACTIVE) ||
860 (new_transport->state == SCTP_UNCONFIRMED) ||
861 (new_transport->state == SCTP_PF)) {
862 /* If the chunk is Heartbeat or Heartbeat Ack,
863 * send it to chunk->transport, even if it's
866 * 3.3.6 Heartbeat Acknowledgement:
868 * A HEARTBEAT ACK is always sent to the source IP
869 * address of the IP datagram containing the
870 * HEARTBEAT chunk to which this ack is responding.
873 * ASCONF_ACKs also must be sent to the source.
875 if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT &&
876 chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK &&
877 chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK)
878 new_transport = asoc->peer.active_path;
881 /* Are we switching transports?
882 * Take care of transport locks.
884 if (new_transport != transport) {
885 transport = new_transport;
886 if (list_empty(&transport->send_ready)) {
887 list_add_tail(&transport->send_ready,
890 packet = &transport->packet;
891 sctp_packet_config(packet, vtag,
892 asoc->peer.ecn_capable);
895 switch (chunk->chunk_hdr->type) {
899 * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
900 * COMPLETE with any other chunks. [Send them immediately.]
903 case SCTP_CID_INIT_ACK:
904 case SCTP_CID_SHUTDOWN_COMPLETE:
905 sctp_packet_init(&singleton, transport, sport, dport);
906 sctp_packet_config(&singleton, vtag, 0);
907 sctp_packet_append_chunk(&singleton, chunk);
908 error = sctp_packet_transmit(&singleton, gfp);
910 asoc->base.sk->sk_err = -error;
916 if (sctp_test_T_bit(chunk)) {
917 packet->vtag = asoc->c.my_vtag;
919 /* The following chunks are "response" chunks, i.e.
920 * they are generated in response to something we
921 * received. If we are sending these, then we can
922 * send only 1 packet containing these chunks.
924 case SCTP_CID_HEARTBEAT_ACK:
925 case SCTP_CID_SHUTDOWN_ACK:
926 case SCTP_CID_COOKIE_ACK:
927 case SCTP_CID_COOKIE_ECHO:
929 case SCTP_CID_ECN_CWR:
930 case SCTP_CID_ASCONF_ACK:
935 case SCTP_CID_HEARTBEAT:
936 case SCTP_CID_SHUTDOWN:
937 case SCTP_CID_ECN_ECNE:
938 case SCTP_CID_ASCONF:
939 case SCTP_CID_FWD_TSN:
940 case SCTP_CID_RECONF:
941 status = sctp_packet_transmit_chunk(packet, chunk,
943 if (status != SCTP_XMIT_OK) {
944 /* put the chunk back */
945 list_add(&chunk->list, &q->control_chunk_list);
949 asoc->stats.octrlchunks++;
950 /* PR-SCTP C5) If a FORWARD TSN is sent, the
951 * sender MUST assure that at least one T3-rtx
954 if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
955 sctp_transport_reset_t3_rtx(transport);
956 transport->last_time_sent = jiffies;
959 if (chunk == asoc->strreset_chunk)
960 sctp_transport_reset_reconf_timer(transport);
965 /* We built a chunk with an illegal type! */
970 if (q->asoc->src_out_of_asoc_ok)
973 /* Is it OK to send data chunks? */
974 switch (asoc->state) {
975 case SCTP_STATE_COOKIE_ECHOED:
976 /* Only allow bundling when this packet has a COOKIE-ECHO
979 if (!packet || !packet->has_cookie_echo)
983 case SCTP_STATE_ESTABLISHED:
984 case SCTP_STATE_SHUTDOWN_PENDING:
985 case SCTP_STATE_SHUTDOWN_RECEIVED:
987 * RFC 2960 6.1 Transmission of DATA Chunks
989 * C) When the time comes for the sender to transmit,
990 * before sending new DATA chunks, the sender MUST
991 * first transmit any outstanding DATA chunks which
992 * are marked for retransmission (limited by the
995 if (!list_empty(&q->retransmit)) {
996 if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
998 if (transport == asoc->peer.retran_path)
1001 /* Switch transports & prepare the packet. */
1003 transport = asoc->peer.retran_path;
1005 if (list_empty(&transport->send_ready)) {
1006 list_add_tail(&transport->send_ready,
1010 packet = &transport->packet;
1011 sctp_packet_config(packet, vtag,
1012 asoc->peer.ecn_capable);
1014 error = sctp_outq_flush_rtx(q, packet,
1015 rtx_timeout, &start_timer);
1017 asoc->base.sk->sk_err = -error;
1020 sctp_transport_reset_t3_rtx(transport);
1021 transport->last_time_sent = jiffies;
1024 /* This can happen on COOKIE-ECHO resend. Only
1025 * one chunk can get bundled with a COOKIE-ECHO.
1027 if (packet->has_cookie_echo)
1028 goto sctp_flush_out;
1030 /* Don't send new data if there is still data
1031 * waiting to retransmit.
1033 if (!list_empty(&q->retransmit))
1034 goto sctp_flush_out;
1037 /* Apply Max.Burst limitation to the current transport in
1038 * case it will be used for new data. We are going to
1039 * rest it before we return, but we want to apply the limit
1040 * to the currently queued data.
1043 sctp_transport_burst_limited(transport);
1045 /* Finally, transmit new packets. */
1046 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
1047 __u32 sid = ntohs(chunk->subh.data_hdr->stream);
1049 /* Has this chunk expired? */
1050 if (sctp_chunk_abandoned(chunk)) {
1051 sctp_sched_dequeue_done(q, chunk);
1052 sctp_chunk_fail(chunk, 0);
1053 sctp_chunk_free(chunk);
1057 if (asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) {
1058 sctp_outq_head_data(q, chunk);
1059 goto sctp_flush_out;
1062 /* If there is a specified transport, use it.
1063 * Otherwise, we want to use the active path.
1065 new_transport = chunk->transport;
1066 if (!new_transport ||
1067 ((new_transport->state == SCTP_INACTIVE) ||
1068 (new_transport->state == SCTP_UNCONFIRMED) ||
1069 (new_transport->state == SCTP_PF)))
1070 new_transport = asoc->peer.active_path;
1071 if (new_transport->state == SCTP_UNCONFIRMED) {
1072 WARN_ONCE(1, "Attempt to send packet on unconfirmed path.");
1073 sctp_sched_dequeue_done(q, chunk);
1074 sctp_chunk_fail(chunk, 0);
1075 sctp_chunk_free(chunk);
1079 /* Change packets if necessary. */
1080 if (new_transport != transport) {
1081 transport = new_transport;
1083 /* Schedule to have this transport's
1086 if (list_empty(&transport->send_ready)) {
1087 list_add_tail(&transport->send_ready,
1091 packet = &transport->packet;
1092 sctp_packet_config(packet, vtag,
1093 asoc->peer.ecn_capable);
1094 /* We've switched transports, so apply the
1095 * Burst limit to the new transport.
1097 sctp_transport_burst_limited(transport);
1100 pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p "
1102 __func__, q, chunk, chunk && chunk->chunk_hdr ?
1103 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
1104 "illegal chunk", ntohl(chunk->subh.data_hdr->tsn),
1105 chunk->skb ? chunk->skb->head : NULL, chunk->skb ?
1106 refcount_read(&chunk->skb->users) : -1);
1108 /* Add the chunk to the packet. */
1109 status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp);
1112 case SCTP_XMIT_PMTU_FULL:
1113 case SCTP_XMIT_RWND_FULL:
1114 case SCTP_XMIT_DELAY:
1115 /* We could not append this chunk, so put
1116 * the chunk back on the output queue.
1118 pr_debug("%s: could not transmit tsn:0x%x, status:%d\n",
1119 __func__, ntohl(chunk->subh.data_hdr->tsn),
1122 sctp_outq_head_data(q, chunk);
1123 goto sctp_flush_out;
1126 /* The sender is in the SHUTDOWN-PENDING state,
1127 * The sender MAY set the I-bit in the DATA
1130 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1131 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1132 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1133 asoc->stats.ouodchunks++;
1135 asoc->stats.oodchunks++;
1137 /* Only now it's safe to consider this
1138 * chunk as sent, sched-wise.
1140 sctp_sched_dequeue_done(q, chunk);
1148 /* BUG: We assume that the sctp_packet_transmit()
1149 * call below will succeed all the time and add the
1150 * chunk to the transmitted list and restart the
1152 * It is possible that the call can fail under OOM
1155 * Is this really a problem? Won't this behave
1158 list_add_tail(&chunk->transmitted_list,
1159 &transport->transmitted);
1161 sctp_transport_reset_t3_rtx(transport);
1162 transport->last_time_sent = jiffies;
1164 /* Only let one DATA chunk get bundled with a
1165 * COOKIE-ECHO chunk.
1167 if (packet->has_cookie_echo)
1168 goto sctp_flush_out;
1179 /* Before returning, examine all the transports touched in
1180 * this call. Right now, we bluntly force clear all the
1181 * transports. Things might change after we implement Nagle.
1182 * But such an examination is still required.
1186 while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL) {
1187 struct sctp_transport *t = list_entry(ltransport,
1188 struct sctp_transport,
1190 packet = &t->packet;
1191 if (!sctp_packet_empty(packet)) {
1192 error = sctp_packet_transmit(packet, gfp);
1194 asoc->base.sk->sk_err = -error;
1197 /* Clear the burst limited state, if any */
1198 sctp_transport_burst_reset(t);
1202 /* Update unack_data based on the incoming SACK chunk */
1203 static void sctp_sack_update_unack_data(struct sctp_association *assoc,
1204 struct sctp_sackhdr *sack)
1206 union sctp_sack_variable *frags;
1210 unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;
1212 frags = sack->variable;
1213 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {
1214 unack_data -= ((ntohs(frags[i].gab.end) -
1215 ntohs(frags[i].gab.start) + 1));
1218 assoc->unack_data = unack_data;
1221 /* This is where we REALLY process a SACK.
1223 * Process the SACK against the outqueue. Mostly, this just frees
1224 * things off the transmitted queue.
1226 int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
1228 struct sctp_association *asoc = q->asoc;
1229 struct sctp_sackhdr *sack = chunk->subh.sack_hdr;
1230 struct sctp_transport *transport;
1231 struct sctp_chunk *tchunk = NULL;
1232 struct list_head *lchunk, *transport_list, *temp;
1233 union sctp_sack_variable *frags = sack->variable;
1234 __u32 sack_ctsn, ctsn, tsn;
1235 __u32 highest_tsn, highest_new_tsn;
1237 unsigned int outstanding;
1238 struct sctp_transport *primary = asoc->peer.primary_path;
1239 int count_of_newacks = 0;
1243 /* Grab the association's destination address list. */
1244 transport_list = &asoc->peer.transport_addr_list;
1246 sack_ctsn = ntohl(sack->cum_tsn_ack);
1247 gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
1248 asoc->stats.gapcnt += gap_ack_blocks;
1250 * SFR-CACC algorithm:
1251 * On receipt of a SACK the sender SHOULD execute the
1252 * following statements.
1254 * 1) If the cumulative ack in the SACK passes next tsn_at_change
1255 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
1256 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
1258 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
1259 * is set the receiver of the SACK MUST take the following actions:
1261 * A) Initialize the cacc_saw_newack to 0 for all destination
1264 * Only bother if changeover_active is set. Otherwise, this is
1265 * totally suboptimal to do on every SACK.
1267 if (primary->cacc.changeover_active) {
1268 u8 clear_cycling = 0;
1270 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
1271 primary->cacc.changeover_active = 0;
1275 if (clear_cycling || gap_ack_blocks) {
1276 list_for_each_entry(transport, transport_list,
1279 transport->cacc.cycling_changeover = 0;
1281 transport->cacc.cacc_saw_newack = 0;
1286 /* Get the highest TSN in the sack. */
1287 highest_tsn = sack_ctsn;
1289 highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
1291 if (TSN_lt(asoc->highest_sacked, highest_tsn))
1292 asoc->highest_sacked = highest_tsn;
1294 highest_new_tsn = sack_ctsn;
1296 /* Run through the retransmit queue. Credit bytes received
1297 * and free those chunks that we can.
1299 sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn);
1301 /* Run through the transmitted queue.
1302 * Credit bytes received and free those chunks which we can.
1304 * This is a MASSIVE candidate for optimization.
1306 list_for_each_entry(transport, transport_list, transports) {
1307 sctp_check_transmitted(q, &transport->transmitted,
1308 transport, &chunk->source, sack,
1311 * SFR-CACC algorithm:
1312 * C) Let count_of_newacks be the number of
1313 * destinations for which cacc_saw_newack is set.
1315 if (transport->cacc.cacc_saw_newack)
1319 /* Move the Cumulative TSN Ack Point if appropriate. */
1320 if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
1321 asoc->ctsn_ack_point = sack_ctsn;
1325 if (gap_ack_blocks) {
1327 if (asoc->fast_recovery && accum_moved)
1328 highest_new_tsn = highest_tsn;
1330 list_for_each_entry(transport, transport_list, transports)
1331 sctp_mark_missing(q, &transport->transmitted, transport,
1332 highest_new_tsn, count_of_newacks);
1335 /* Update unack_data field in the assoc. */
1336 sctp_sack_update_unack_data(asoc, sack);
1338 ctsn = asoc->ctsn_ack_point;
1340 /* Throw away stuff rotting on the sack queue. */
1341 list_for_each_safe(lchunk, temp, &q->sacked) {
1342 tchunk = list_entry(lchunk, struct sctp_chunk,
1344 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1345 if (TSN_lte(tsn, ctsn)) {
1346 list_del_init(&tchunk->transmitted_list);
1347 if (asoc->peer.prsctp_capable &&
1348 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
1349 asoc->sent_cnt_removable--;
1350 sctp_chunk_free(tchunk);
1354 /* ii) Set rwnd equal to the newly received a_rwnd minus the
1355 * number of bytes still outstanding after processing the
1356 * Cumulative TSN Ack and the Gap Ack Blocks.
1359 sack_a_rwnd = ntohl(sack->a_rwnd);
1360 asoc->peer.zero_window_announced = !sack_a_rwnd;
1361 outstanding = q->outstanding_bytes;
1363 if (outstanding < sack_a_rwnd)
1364 sack_a_rwnd -= outstanding;
1368 asoc->peer.rwnd = sack_a_rwnd;
1370 sctp_generate_fwdtsn(q, sack_ctsn);
1372 pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn);
1373 pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, "
1374 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
1375 asoc->adv_peer_ack_point);
1377 return sctp_outq_is_empty(q);
1380 /* Is the outqueue empty?
1381 * The queue is empty when we have not pending data, no in-flight data
1382 * and nothing pending retransmissions.
1384 int sctp_outq_is_empty(const struct sctp_outq *q)
1386 return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
1387 list_empty(&q->retransmit);
1390 /********************************************************************
1391 * 2nd Level Abstractions
1392 ********************************************************************/
1394 /* Go through a transport's transmitted list or the association's retransmit
1395 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
1396 * The retransmit list will not have an associated transport.
1398 * I added coherent debug information output. --xguo
1400 * Instead of printing 'sacked' or 'kept' for each TSN on the
1401 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
1402 * KEPT TSN6-TSN7, etc.
1404 static void sctp_check_transmitted(struct sctp_outq *q,
1405 struct list_head *transmitted_queue,
1406 struct sctp_transport *transport,
1407 union sctp_addr *saddr,
1408 struct sctp_sackhdr *sack,
1409 __u32 *highest_new_tsn_in_sack)
1411 struct list_head *lchunk;
1412 struct sctp_chunk *tchunk;
1413 struct list_head tlist;
1417 __u8 restart_timer = 0;
1418 int bytes_acked = 0;
1419 int migrate_bytes = 0;
1420 bool forward_progress = false;
1422 sack_ctsn = ntohl(sack->cum_tsn_ack);
1424 INIT_LIST_HEAD(&tlist);
1426 /* The while loop will skip empty transmitted queues. */
1427 while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {
1428 tchunk = list_entry(lchunk, struct sctp_chunk,
1431 if (sctp_chunk_abandoned(tchunk)) {
1432 /* Move the chunk to abandoned list. */
1433 sctp_insert_list(&q->abandoned, lchunk);
1435 /* If this chunk has not been acked, stop
1436 * considering it as 'outstanding'.
1438 if (transmitted_queue != &q->retransmit &&
1439 !tchunk->tsn_gap_acked) {
1440 if (tchunk->transport)
1441 tchunk->transport->flight_size -=
1442 sctp_data_size(tchunk);
1443 q->outstanding_bytes -= sctp_data_size(tchunk);
1448 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1449 if (sctp_acked(sack, tsn)) {
1450 /* If this queue is the retransmit queue, the
1451 * retransmit timer has already reclaimed
1452 * the outstanding bytes for this chunk, so only
1453 * count bytes associated with a transport.
1456 /* If this chunk is being used for RTT
1457 * measurement, calculate the RTT and update
1458 * the RTO using this value.
1460 * 6.3.1 C5) Karn's algorithm: RTT measurements
1461 * MUST NOT be made using packets that were
1462 * retransmitted (and thus for which it is
1463 * ambiguous whether the reply was for the
1464 * first instance of the packet or a later
1467 if (!tchunk->tsn_gap_acked &&
1468 !sctp_chunk_retransmitted(tchunk) &&
1469 tchunk->rtt_in_progress) {
1470 tchunk->rtt_in_progress = 0;
1471 rtt = jiffies - tchunk->sent_at;
1472 sctp_transport_update_rto(transport,
1477 /* If the chunk hasn't been marked as ACKED,
1478 * mark it and account bytes_acked if the
1479 * chunk had a valid transport (it will not
1480 * have a transport if ASCONF had deleted it
1481 * while DATA was outstanding).
1483 if (!tchunk->tsn_gap_acked) {
1484 tchunk->tsn_gap_acked = 1;
1485 if (TSN_lt(*highest_new_tsn_in_sack, tsn))
1486 *highest_new_tsn_in_sack = tsn;
1487 bytes_acked += sctp_data_size(tchunk);
1488 if (!tchunk->transport)
1489 migrate_bytes += sctp_data_size(tchunk);
1490 forward_progress = true;
1493 if (TSN_lte(tsn, sack_ctsn)) {
1494 /* RFC 2960 6.3.2 Retransmission Timer Rules
1496 * R3) Whenever a SACK is received
1497 * that acknowledges the DATA chunk
1498 * with the earliest outstanding TSN
1499 * for that address, restart T3-rtx
1500 * timer for that address with its
1504 forward_progress = true;
1506 if (!tchunk->tsn_gap_acked) {
1508 * SFR-CACC algorithm:
1509 * 2) If the SACK contains gap acks
1510 * and the flag CHANGEOVER_ACTIVE is
1511 * set the receiver of the SACK MUST
1512 * take the following action:
1514 * B) For each TSN t being acked that
1515 * has not been acked in any SACK so
1516 * far, set cacc_saw_newack to 1 for
1517 * the destination that the TSN was
1521 sack->num_gap_ack_blocks &&
1522 q->asoc->peer.primary_path->cacc.
1524 transport->cacc.cacc_saw_newack
1528 list_add_tail(&tchunk->transmitted_list,
1531 /* RFC2960 7.2.4, sctpimpguide-05 2.8.2
1532 * M2) Each time a SACK arrives reporting
1533 * 'Stray DATA chunk(s)' record the highest TSN
1534 * reported as newly acknowledged, call this
1535 * value 'HighestTSNinSack'. A newly
1536 * acknowledged DATA chunk is one not
1537 * previously acknowledged in a SACK.
1539 * When the SCTP sender of data receives a SACK
1540 * chunk that acknowledges, for the first time,
1541 * the receipt of a DATA chunk, all the still
1542 * unacknowledged DATA chunks whose TSN is
1543 * older than that newly acknowledged DATA
1544 * chunk, are qualified as 'Stray DATA chunks'.
1546 list_add_tail(lchunk, &tlist);
1549 if (tchunk->tsn_gap_acked) {
1550 pr_debug("%s: receiver reneged on data TSN:0x%x\n",
1553 tchunk->tsn_gap_acked = 0;
1555 if (tchunk->transport)
1556 bytes_acked -= sctp_data_size(tchunk);
1558 /* RFC 2960 6.3.2 Retransmission Timer Rules
1560 * R4) Whenever a SACK is received missing a
1561 * TSN that was previously acknowledged via a
1562 * Gap Ack Block, start T3-rtx for the
1563 * destination address to which the DATA
1564 * chunk was originally
1565 * transmitted if it is not already running.
1570 list_add_tail(lchunk, &tlist);
1576 struct sctp_association *asoc = transport->asoc;
1578 /* We may have counted DATA that was migrated
1579 * to this transport due to DEL-IP operation.
1580 * Subtract those bytes, since the were never
1581 * send on this transport and shouldn't be
1582 * credited to this transport.
1584 bytes_acked -= migrate_bytes;
1586 /* 8.2. When an outstanding TSN is acknowledged,
1587 * the endpoint shall clear the error counter of
1588 * the destination transport address to which the
1589 * DATA chunk was last sent.
1590 * The association's overall error counter is
1593 transport->error_count = 0;
1594 transport->asoc->overall_error_count = 0;
1595 forward_progress = true;
1598 * While in SHUTDOWN PENDING, we may have started
1599 * the T5 shutdown guard timer after reaching the
1600 * retransmission limit. Stop that timer as soon
1601 * as the receiver acknowledged any data.
1603 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
1604 del_timer(&asoc->timers
1605 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
1606 sctp_association_put(asoc);
1608 /* Mark the destination transport address as
1609 * active if it is not so marked.
1611 if ((transport->state == SCTP_INACTIVE ||
1612 transport->state == SCTP_UNCONFIRMED) &&
1613 sctp_cmp_addr_exact(&transport->ipaddr, saddr)) {
1614 sctp_assoc_control_transport(
1618 SCTP_RECEIVED_SACK);
1621 sctp_transport_raise_cwnd(transport, sack_ctsn,
1624 transport->flight_size -= bytes_acked;
1625 if (transport->flight_size == 0)
1626 transport->partial_bytes_acked = 0;
1627 q->outstanding_bytes -= bytes_acked + migrate_bytes;
1629 /* RFC 2960 6.1, sctpimpguide-06 2.15.2
1630 * When a sender is doing zero window probing, it
1631 * should not timeout the association if it continues
1632 * to receive new packets from the receiver. The
1633 * reason is that the receiver MAY keep its window
1634 * closed for an indefinite time.
1635 * A sender is doing zero window probing when the
1636 * receiver's advertised window is zero, and there is
1637 * only one data chunk in flight to the receiver.
1639 * Allow the association to timeout while in SHUTDOWN
1640 * PENDING or SHUTDOWN RECEIVED in case the receiver
1641 * stays in zero window mode forever.
1643 if (!q->asoc->peer.rwnd &&
1644 !list_empty(&tlist) &&
1645 (sack_ctsn+2 == q->asoc->next_tsn) &&
1646 q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
1647 pr_debug("%s: sack received for zero window "
1648 "probe:%u\n", __func__, sack_ctsn);
1650 q->asoc->overall_error_count = 0;
1651 transport->error_count = 0;
1655 /* RFC 2960 6.3.2 Retransmission Timer Rules
1657 * R2) Whenever all outstanding data sent to an address have
1658 * been acknowledged, turn off the T3-rtx timer of that
1661 if (!transport->flight_size) {
1662 if (del_timer(&transport->T3_rtx_timer))
1663 sctp_transport_put(transport);
1664 } else if (restart_timer) {
1665 if (!mod_timer(&transport->T3_rtx_timer,
1666 jiffies + transport->rto))
1667 sctp_transport_hold(transport);
1670 if (forward_progress) {
1672 sctp_transport_dst_confirm(transport);
1676 list_splice(&tlist, transmitted_queue);
1679 /* Mark chunks as missing and consequently may get retransmitted. */
1680 static void sctp_mark_missing(struct sctp_outq *q,
1681 struct list_head *transmitted_queue,
1682 struct sctp_transport *transport,
1683 __u32 highest_new_tsn_in_sack,
1684 int count_of_newacks)
1686 struct sctp_chunk *chunk;
1688 char do_fast_retransmit = 0;
1689 struct sctp_association *asoc = q->asoc;
1690 struct sctp_transport *primary = asoc->peer.primary_path;
1692 list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
1694 tsn = ntohl(chunk->subh.data_hdr->tsn);
1696 /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
1697 * 'Unacknowledged TSN's', if the TSN number of an
1698 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
1699 * value, increment the 'TSN.Missing.Report' count on that
1700 * chunk if it has NOT been fast retransmitted or marked for
1701 * fast retransmit already.
1703 if (chunk->fast_retransmit == SCTP_CAN_FRTX &&
1704 !chunk->tsn_gap_acked &&
1705 TSN_lt(tsn, highest_new_tsn_in_sack)) {
1707 /* SFR-CACC may require us to skip marking
1708 * this chunk as missing.
1710 if (!transport || !sctp_cacc_skip(primary,
1712 count_of_newacks, tsn)) {
1713 chunk->tsn_missing_report++;
1715 pr_debug("%s: tsn:0x%x missing counter:%d\n",
1716 __func__, tsn, chunk->tsn_missing_report);
1720 * M4) If any DATA chunk is found to have a
1721 * 'TSN.Missing.Report'
1722 * value larger than or equal to 3, mark that chunk for
1723 * retransmission and start the fast retransmit procedure.
1726 if (chunk->tsn_missing_report >= 3) {
1727 chunk->fast_retransmit = SCTP_NEED_FRTX;
1728 do_fast_retransmit = 1;
1733 if (do_fast_retransmit)
1734 sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
1736 pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, "
1737 "flight_size:%d, pba:%d\n", __func__, transport,
1738 transport->cwnd, transport->ssthresh,
1739 transport->flight_size, transport->partial_bytes_acked);
1743 /* Is the given TSN acked by this packet? */
1744 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
1746 __u32 ctsn = ntohl(sack->cum_tsn_ack);
1747 union sctp_sack_variable *frags;
1748 __u16 tsn_offset, blocks;
1751 if (TSN_lte(tsn, ctsn))
1754 /* 3.3.4 Selective Acknowledgement (SACK) (3):
1757 * These fields contain the Gap Ack Blocks. They are repeated
1758 * for each Gap Ack Block up to the number of Gap Ack Blocks
1759 * defined in the Number of Gap Ack Blocks field. All DATA
1760 * chunks with TSNs greater than or equal to (Cumulative TSN
1761 * Ack + Gap Ack Block Start) and less than or equal to
1762 * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
1763 * Block are assumed to have been received correctly.
1766 frags = sack->variable;
1767 blocks = ntohs(sack->num_gap_ack_blocks);
1768 tsn_offset = tsn - ctsn;
1769 for (i = 0; i < blocks; ++i) {
1770 if (tsn_offset >= ntohs(frags[i].gab.start) &&
1771 tsn_offset <= ntohs(frags[i].gab.end))
1780 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
1781 int nskips, __be16 stream)
1785 for (i = 0; i < nskips; i++) {
1786 if (skiplist[i].stream == stream)
1792 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */
1793 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1795 struct sctp_association *asoc = q->asoc;
1796 struct sctp_chunk *ftsn_chunk = NULL;
1797 struct sctp_fwdtsn_skip ftsn_skip_arr[10];
1801 struct sctp_chunk *chunk;
1802 struct list_head *lchunk, *temp;
1804 if (!asoc->peer.prsctp_capable)
1807 /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
1810 * If (Advanced.Peer.Ack.Point < SackCumAck), then update
1811 * Advanced.Peer.Ack.Point to be equal to SackCumAck.
1813 if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1814 asoc->adv_peer_ack_point = ctsn;
1816 /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
1817 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
1818 * the chunk next in the out-queue space is marked as "abandoned" as
1819 * shown in the following example:
1821 * Assuming that a SACK arrived with the Cumulative TSN ACK 102
1822 * and the Advanced.Peer.Ack.Point is updated to this value:
1824 * out-queue at the end of ==> out-queue after Adv.Ack.Point
1825 * normal SACK processing local advancement
1827 * Adv.Ack.Pt-> 102 acked 102 acked
1828 * 103 abandoned 103 abandoned
1829 * 104 abandoned Adv.Ack.P-> 104 abandoned
1831 * 106 acked 106 acked
1834 * In this example, the data sender successfully advanced the
1835 * "Advanced.Peer.Ack.Point" from 102 to 104 locally.
1837 list_for_each_safe(lchunk, temp, &q->abandoned) {
1838 chunk = list_entry(lchunk, struct sctp_chunk,
1840 tsn = ntohl(chunk->subh.data_hdr->tsn);
1842 /* Remove any chunks in the abandoned queue that are acked by
1845 if (TSN_lte(tsn, ctsn)) {
1846 list_del_init(lchunk);
1847 sctp_chunk_free(chunk);
1849 if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {
1850 asoc->adv_peer_ack_point = tsn;
1851 if (chunk->chunk_hdr->flags &
1852 SCTP_DATA_UNORDERED)
1854 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],
1856 chunk->subh.data_hdr->stream);
1857 ftsn_skip_arr[skip_pos].stream =
1858 chunk->subh.data_hdr->stream;
1859 ftsn_skip_arr[skip_pos].ssn =
1860 chunk->subh.data_hdr->ssn;
1861 if (skip_pos == nskips)
1870 /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
1871 * is greater than the Cumulative TSN ACK carried in the received
1872 * SACK, the data sender MUST send the data receiver a FORWARD TSN
1873 * chunk containing the latest value of the
1874 * "Advanced.Peer.Ack.Point".
1876 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
1877 * list each stream and sequence number in the forwarded TSN. This
1878 * information will enable the receiver to easily find any
1879 * stranded TSN's waiting on stream reorder queues. Each stream
1880 * SHOULD only be reported once; this means that if multiple
1881 * abandoned messages occur in the same stream then only the
1882 * highest abandoned stream sequence number is reported. If the
1883 * total size of the FORWARD TSN does NOT fit in a single MTU then
1884 * the sender of the FORWARD TSN SHOULD lower the
1885 * Advanced.Peer.Ack.Point to the last TSN that will fit in a
1888 if (asoc->adv_peer_ack_point > ctsn)
1889 ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
1890 nskips, &ftsn_skip_arr[0]);
1893 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1894 SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);