1 /* SCTP kernel implementation
2 * (C) Copyright Red Hat Inc. 2017
4 * This file is part of the SCTP kernel implementation
6 * These functions implement sctp stream message interleaving, mostly
7 * including I-DATA and I-FORWARD-TSN chunks process.
9 * This SCTP implementation is free software;
10 * you can redistribute it and/or modify it under the terms of
11 * the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
15 * This SCTP implementation is distributed in the hope that it
16 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
17 * ************************
18 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
19 * See the GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with GNU CC; see the file COPYING. If not, see
23 * <http://www.gnu.org/licenses/>.
25 * Please send any bug reports or fixes you make to the
26 * email addresched(es):
27 * lksctp developers <linux-sctp@vger.kernel.org>
29 * Written or modified by:
30 * Xin Long <lucien.xin@gmail.com>
33 #include <net/busy_poll.h>
34 #include <net/sctp/sctp.h>
35 #include <net/sctp/sm.h>
36 #include <net/sctp/ulpevent.h>
37 #include <linux/sctp.h>
39 static struct sctp_chunk *sctp_make_idatafrag_empty(
40 const struct sctp_association *asoc,
41 const struct sctp_sndrcvinfo *sinfo,
42 int len, __u8 flags, gfp_t gfp)
44 struct sctp_chunk *retval;
45 struct sctp_idatahdr dp;
47 memset(&dp, 0, sizeof(dp));
48 dp.stream = htons(sinfo->sinfo_stream);
50 if (sinfo->sinfo_flags & SCTP_UNORDERED)
51 flags |= SCTP_DATA_UNORDERED;
53 retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
57 retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
58 memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
63 static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
65 struct sctp_stream *stream;
66 struct sctp_chunk *lchunk;
73 sid = sctp_chunk_stream_no(chunk);
74 stream = &chunk->asoc->stream;
76 list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
77 struct sctp_idatahdr *hdr;
82 hdr = lchunk->subh.idata_hdr;
84 if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
85 hdr->ppid = lchunk->sinfo.sinfo_ppid;
87 hdr->fsn = htonl(cfsn++);
89 if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
90 mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
91 sctp_mid_uo_next(stream, out, sid) :
92 sctp_mid_uo_peek(stream, out, sid);
94 mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
95 sctp_mid_next(stream, out, sid) :
96 sctp_mid_peek(stream, out, sid);
98 hdr->mid = htonl(mid);
102 static bool sctp_validate_data(struct sctp_chunk *chunk)
104 struct sctp_stream *stream;
107 if (chunk->chunk_hdr->type != SCTP_CID_DATA)
110 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
113 stream = &chunk->asoc->stream;
114 sid = sctp_chunk_stream_no(chunk);
115 ssn = ntohs(chunk->subh.data_hdr->ssn);
117 return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
120 static bool sctp_validate_idata(struct sctp_chunk *chunk)
122 struct sctp_stream *stream;
126 if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
129 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
132 stream = &chunk->asoc->stream;
133 sid = sctp_chunk_stream_no(chunk);
134 mid = ntohl(chunk->subh.idata_hdr->mid);
136 return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
139 static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
140 struct sctp_ulpevent *event)
142 struct sctp_ulpevent *cevent;
143 struct sk_buff *pos, *loc;
145 pos = skb_peek_tail(&ulpq->reasm);
147 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
151 cevent = sctp_skb2event(pos);
153 if (event->stream == cevent->stream &&
154 event->mid == cevent->mid &&
155 (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
156 (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
157 event->fsn > cevent->fsn))) {
158 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
162 if ((event->stream == cevent->stream &&
163 MID_lt(cevent->mid, event->mid)) ||
164 event->stream > cevent->stream) {
165 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
170 skb_queue_walk(&ulpq->reasm, pos) {
171 cevent = sctp_skb2event(pos);
173 if (event->stream < cevent->stream ||
174 (event->stream == cevent->stream &&
175 MID_lt(event->mid, cevent->mid))) {
179 if (event->stream == cevent->stream &&
180 event->mid == cevent->mid &&
181 !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
182 (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
183 event->fsn < cevent->fsn)) {
190 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
192 __skb_queue_before(&ulpq->reasm, loc, sctp_event2skb(event));
195 static struct sctp_ulpevent *sctp_intl_retrieve_partial(
196 struct sctp_ulpq *ulpq,
197 struct sctp_ulpevent *event)
199 struct sk_buff *first_frag = NULL;
200 struct sk_buff *last_frag = NULL;
201 struct sctp_ulpevent *retval;
202 struct sctp_stream_in *sin;
207 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
209 skb_queue_walk(&ulpq->reasm, pos) {
210 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
212 if (cevent->stream < event->stream)
215 if (cevent->stream > event->stream ||
216 cevent->mid != sin->mid)
219 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
220 case SCTP_DATA_FIRST_FRAG:
222 case SCTP_DATA_MIDDLE_FRAG:
224 if (cevent->fsn == sin->fsn) {
227 next_fsn = cevent->fsn + 1;
229 } else if (cevent->fsn == next_fsn) {
236 case SCTP_DATA_LAST_FRAG:
238 if (cevent->fsn == sin->fsn) {
244 } else if (cevent->fsn == next_fsn) {
259 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
260 &ulpq->reasm, first_frag,
265 retval->msg_flags |= MSG_EOR;
273 static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
274 struct sctp_ulpq *ulpq,
275 struct sctp_ulpevent *event)
277 struct sctp_association *asoc = ulpq->asoc;
278 struct sk_buff *pos, *first_frag = NULL;
279 struct sctp_ulpevent *retval = NULL;
280 struct sk_buff *pd_first = NULL;
281 struct sk_buff *pd_last = NULL;
282 struct sctp_stream_in *sin;
288 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
290 skb_queue_walk(&ulpq->reasm, pos) {
291 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
293 if (cevent->stream < event->stream)
295 if (cevent->stream > event->stream)
298 if (MID_lt(cevent->mid, event->mid))
300 if (MID_lt(event->mid, cevent->mid))
303 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
304 case SCTP_DATA_FIRST_FRAG:
305 if (cevent->mid == sin->mid) {
316 case SCTP_DATA_MIDDLE_FRAG:
317 if (first_frag && cevent->mid == mid &&
318 cevent->fsn == next_fsn) {
329 case SCTP_DATA_LAST_FRAG:
330 if (first_frag && cevent->mid == mid &&
331 cevent->fsn == next_fsn)
342 pd_point = sctp_sk(asoc->base.sk)->pd_point;
343 if (pd_point && pd_point <= pd_len) {
344 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
355 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
359 retval->msg_flags |= MSG_EOR;
365 static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
366 struct sctp_ulpevent *event)
368 struct sctp_ulpevent *retval = NULL;
369 struct sctp_stream_in *sin;
371 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
372 event->msg_flags |= MSG_EOR;
376 sctp_intl_store_reasm(ulpq, event);
378 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
379 if (sin->pd_mode && event->mid == sin->mid &&
380 event->fsn == sin->fsn)
381 retval = sctp_intl_retrieve_partial(ulpq, event);
384 retval = sctp_intl_retrieve_reassembled(ulpq, event);
389 static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
390 struct sctp_ulpevent *event)
392 struct sctp_ulpevent *cevent;
393 struct sk_buff *pos, *loc;
395 pos = skb_peek_tail(&ulpq->lobby);
397 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
401 cevent = (struct sctp_ulpevent *)pos->cb;
402 if (event->stream == cevent->stream &&
403 MID_lt(cevent->mid, event->mid)) {
404 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
408 if (event->stream > cevent->stream) {
409 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
414 skb_queue_walk(&ulpq->lobby, pos) {
415 cevent = (struct sctp_ulpevent *)pos->cb;
417 if (cevent->stream > event->stream) {
421 if (cevent->stream == event->stream &&
422 MID_lt(event->mid, cevent->mid)) {
429 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
431 __skb_queue_before(&ulpq->lobby, loc, sctp_event2skb(event));
434 static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
435 struct sctp_ulpevent *event)
437 struct sk_buff_head *event_list;
438 struct sctp_stream *stream;
439 struct sk_buff *pos, *tmp;
440 __u16 sid = event->stream;
442 stream = &ulpq->asoc->stream;
443 event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
445 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
446 struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
448 if (cevent->stream > sid)
451 if (cevent->stream < sid)
454 if (cevent->mid != sctp_mid_peek(stream, in, sid))
457 sctp_mid_next(stream, in, sid);
459 __skb_unlink(pos, &ulpq->lobby);
461 __skb_queue_tail(event_list, pos);
465 static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
466 struct sctp_ulpevent *event)
468 struct sctp_stream *stream;
471 stream = &ulpq->asoc->stream;
474 if (event->mid != sctp_mid_peek(stream, in, sid)) {
475 sctp_intl_store_ordered(ulpq, event);
479 sctp_mid_next(stream, in, sid);
481 sctp_intl_retrieve_ordered(ulpq, event);
486 static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
487 struct sk_buff_head *skb_list)
489 struct sock *sk = ulpq->asoc->base.sk;
490 struct sctp_sock *sp = sctp_sk(sk);
491 struct sctp_ulpevent *event;
494 skb = __skb_peek(skb_list);
495 event = sctp_skb2event(skb);
497 if (sk->sk_shutdown & RCV_SHUTDOWN &&
498 (sk->sk_shutdown & SEND_SHUTDOWN ||
499 !sctp_ulpevent_is_notification(event)))
502 if (!sctp_ulpevent_is_notification(event)) {
503 sk_mark_napi_id(sk, skb);
504 sk_incoming_cpu_update(sk);
507 if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
511 skb_queue_splice_tail_init(skb_list,
512 &sk->sk_receive_queue);
514 __skb_queue_tail(&sk->sk_receive_queue, skb);
516 if (!sp->data_ready_signalled) {
517 sp->data_ready_signalled = 1;
518 sk->sk_data_ready(sk);
525 sctp_queue_purge_ulpevents(skb_list);
527 sctp_ulpevent_free(event);
532 static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
533 struct sctp_ulpevent *event)
535 struct sctp_ulpevent *cevent;
538 pos = skb_peek_tail(&ulpq->reasm_uo);
540 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
544 cevent = sctp_skb2event(pos);
546 if (event->stream == cevent->stream &&
547 event->mid == cevent->mid &&
548 (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
549 (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
550 event->fsn > cevent->fsn))) {
551 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
555 if ((event->stream == cevent->stream &&
556 MID_lt(cevent->mid, event->mid)) ||
557 event->stream > cevent->stream) {
558 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
562 skb_queue_walk(&ulpq->reasm_uo, pos) {
563 cevent = sctp_skb2event(pos);
565 if (event->stream < cevent->stream ||
566 (event->stream == cevent->stream &&
567 MID_lt(event->mid, cevent->mid)))
570 if (event->stream == cevent->stream &&
571 event->mid == cevent->mid &&
572 !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
573 (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
574 event->fsn < cevent->fsn))
578 __skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
581 static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
582 struct sctp_ulpq *ulpq,
583 struct sctp_ulpevent *event)
585 struct sk_buff *first_frag = NULL;
586 struct sk_buff *last_frag = NULL;
587 struct sctp_ulpevent *retval;
588 struct sctp_stream_in *sin;
593 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
595 skb_queue_walk(&ulpq->reasm_uo, pos) {
596 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
598 if (cevent->stream < event->stream)
600 if (cevent->stream > event->stream)
603 if (MID_lt(cevent->mid, sin->mid_uo))
605 if (MID_lt(sin->mid_uo, cevent->mid))
608 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
609 case SCTP_DATA_FIRST_FRAG:
611 case SCTP_DATA_MIDDLE_FRAG:
613 if (cevent->fsn == sin->fsn_uo) {
616 next_fsn = cevent->fsn + 1;
618 } else if (cevent->fsn == next_fsn) {
625 case SCTP_DATA_LAST_FRAG:
627 if (cevent->fsn == sin->fsn_uo) {
633 } else if (cevent->fsn == next_fsn) {
648 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
649 &ulpq->reasm_uo, first_frag,
652 sin->fsn_uo = next_fsn;
654 retval->msg_flags |= MSG_EOR;
662 static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
663 struct sctp_ulpq *ulpq,
664 struct sctp_ulpevent *event)
666 struct sctp_association *asoc = ulpq->asoc;
667 struct sk_buff *pos, *first_frag = NULL;
668 struct sctp_ulpevent *retval = NULL;
669 struct sk_buff *pd_first = NULL;
670 struct sk_buff *pd_last = NULL;
671 struct sctp_stream_in *sin;
677 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
679 skb_queue_walk(&ulpq->reasm_uo, pos) {
680 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
682 if (cevent->stream < event->stream)
684 if (cevent->stream > event->stream)
687 if (MID_lt(cevent->mid, event->mid))
689 if (MID_lt(event->mid, cevent->mid))
692 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
693 case SCTP_DATA_FIRST_FRAG:
694 if (!sin->pd_mode_uo) {
695 sin->mid_uo = cevent->mid;
706 case SCTP_DATA_MIDDLE_FRAG:
707 if (first_frag && cevent->mid == mid &&
708 cevent->fsn == next_fsn) {
719 case SCTP_DATA_LAST_FRAG:
720 if (first_frag && cevent->mid == mid &&
721 cevent->fsn == next_fsn)
732 pd_point = sctp_sk(asoc->base.sk)->pd_point;
733 if (pd_point && pd_point <= pd_len) {
734 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
738 sin->fsn_uo = next_fsn;
745 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
749 retval->msg_flags |= MSG_EOR;
755 static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
756 struct sctp_ulpevent *event)
758 struct sctp_ulpevent *retval = NULL;
759 struct sctp_stream_in *sin;
761 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
762 event->msg_flags |= MSG_EOR;
766 sctp_intl_store_reasm_uo(ulpq, event);
768 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
769 if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
770 event->fsn == sin->fsn_uo)
771 retval = sctp_intl_retrieve_partial_uo(ulpq, event);
774 retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
779 static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
781 struct sctp_stream_in *csin, *sin = NULL;
782 struct sk_buff *first_frag = NULL;
783 struct sk_buff *last_frag = NULL;
784 struct sctp_ulpevent *retval;
789 skb_queue_walk(&ulpq->reasm_uo, pos) {
790 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
792 csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
793 if (csin->pd_mode_uo)
796 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
797 case SCTP_DATA_FIRST_FRAG:
804 sid = cevent->stream;
805 sin->mid_uo = cevent->mid;
807 case SCTP_DATA_MIDDLE_FRAG:
810 if (cevent->stream == sid &&
811 cevent->mid == sin->mid_uo &&
812 cevent->fsn == next_fsn) {
819 case SCTP_DATA_LAST_FRAG:
832 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
833 &ulpq->reasm_uo, first_frag,
836 sin->fsn_uo = next_fsn;
843 static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
844 struct sctp_chunk *chunk, gfp_t gfp)
846 struct sctp_ulpevent *event;
847 struct sk_buff_head temp;
850 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
854 event->mid = ntohl(chunk->subh.idata_hdr->mid);
855 if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
856 event->ppid = chunk->subh.idata_hdr->ppid;
858 event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
860 if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
861 event = sctp_intl_reasm(ulpq, event);
863 skb_queue_head_init(&temp);
864 __skb_queue_tail(&temp, sctp_event2skb(event));
866 if (event->msg_flags & MSG_EOR)
867 event = sctp_intl_order(ulpq, event);
870 event = sctp_intl_reasm_uo(ulpq, event);
872 skb_queue_head_init(&temp);
873 __skb_queue_tail(&temp, sctp_event2skb(event));
878 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
879 sctp_enqueue_event(ulpq, &temp);
885 static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
887 struct sctp_stream_in *csin, *sin = NULL;
888 struct sk_buff *first_frag = NULL;
889 struct sk_buff *last_frag = NULL;
890 struct sctp_ulpevent *retval;
895 skb_queue_walk(&ulpq->reasm, pos) {
896 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
898 csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
902 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
903 case SCTP_DATA_FIRST_FRAG:
906 if (cevent->mid == csin->mid) {
911 sid = cevent->stream;
914 case SCTP_DATA_MIDDLE_FRAG:
917 if (cevent->stream == sid &&
918 cevent->mid == sin->mid &&
919 cevent->fsn == next_fsn) {
926 case SCTP_DATA_LAST_FRAG:
939 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
940 &ulpq->reasm, first_frag,
950 static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
952 struct sctp_ulpevent *event;
953 struct sk_buff_head temp;
955 if (!skb_queue_empty(&ulpq->reasm)) {
957 event = sctp_intl_retrieve_first(ulpq);
959 skb_queue_head_init(&temp);
960 __skb_queue_tail(&temp, sctp_event2skb(event));
961 sctp_enqueue_event(ulpq, &temp);
966 if (!skb_queue_empty(&ulpq->reasm_uo)) {
968 event = sctp_intl_retrieve_first_uo(ulpq);
970 skb_queue_head_init(&temp);
971 __skb_queue_tail(&temp, sctp_event2skb(event));
972 sctp_enqueue_event(ulpq, &temp);
978 static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
981 struct sctp_association *asoc = ulpq->asoc;
985 needed = ntohs(chunk->chunk_hdr->length) -
986 sizeof(struct sctp_idata_chunk);
988 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
989 freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
991 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
994 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
998 if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
999 sctp_intl_start_pd(ulpq, gfp);
1001 sk_mem_reclaim(asoc->base.sk);
1004 static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
1005 __u32 mid, __u16 flags, gfp_t gfp)
1007 struct sock *sk = ulpq->asoc->base.sk;
1008 struct sctp_ulpevent *ev = NULL;
1010 if (!sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
1011 SCTP_PARTIAL_DELIVERY_EVENT))
1014 ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
1015 sid, mid, flags, gfp);
1017 struct sctp_sock *sp = sctp_sk(sk);
1019 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1021 if (!sp->data_ready_signalled) {
1022 sp->data_ready_signalled = 1;
1023 sk->sk_data_ready(sk);
1028 static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
1030 struct sctp_stream *stream = &ulpq->asoc->stream;
1031 struct sctp_ulpevent *cevent, *event = NULL;
1032 struct sk_buff_head *lobby = &ulpq->lobby;
1033 struct sk_buff *pos, *tmp;
1034 struct sk_buff_head temp;
1038 skb_queue_head_init(&temp);
1039 sctp_skb_for_each(pos, lobby, tmp) {
1040 cevent = (struct sctp_ulpevent *)pos->cb;
1041 csid = cevent->stream;
1050 if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
1053 __skb_unlink(pos, lobby);
1055 event = sctp_skb2event(pos);
1057 __skb_queue_tail(&temp, pos);
1060 if (!event && pos != (struct sk_buff *)lobby) {
1061 cevent = (struct sctp_ulpevent *)pos->cb;
1062 csid = cevent->stream;
1065 if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
1066 sctp_mid_next(stream, in, csid);
1067 __skb_unlink(pos, lobby);
1068 __skb_queue_tail(&temp, pos);
1069 event = sctp_skb2event(pos);
1074 sctp_intl_retrieve_ordered(ulpq, event);
1075 sctp_enqueue_event(ulpq, &temp);
1079 static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1081 struct sctp_stream *stream = &ulpq->asoc->stream;
1084 for (sid = 0; sid < stream->incnt; sid++) {
1085 struct sctp_stream_in *sin = SCTP_SI(stream, sid);
1088 if (sin->pd_mode_uo) {
1089 sin->pd_mode_uo = 0;
1092 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
1099 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
1100 sctp_mid_skip(stream, in, sid, mid);
1102 sctp_intl_reap_ordered(ulpq, sid);
1106 /* intl abort pd happens only when all data needs to be cleaned */
1107 sctp_ulpq_flush(ulpq);
1110 static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
1111 int nskips, __be16 stream, __u8 flags)
1115 for (i = 0; i < nskips; i++)
1116 if (skiplist[i].stream == stream &&
1117 skiplist[i].flags == flags)
1123 #define SCTP_FTSN_U_BIT 0x1
1124 static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
1126 struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
1127 struct sctp_association *asoc = q->asoc;
1128 struct sctp_chunk *ftsn_chunk = NULL;
1129 struct list_head *lchunk, *temp;
1130 int nskips = 0, skip_pos;
1131 struct sctp_chunk *chunk;
1134 if (!asoc->peer.prsctp_capable)
1137 if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1138 asoc->adv_peer_ack_point = ctsn;
1140 list_for_each_safe(lchunk, temp, &q->abandoned) {
1141 chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
1142 tsn = ntohl(chunk->subh.data_hdr->tsn);
1144 if (TSN_lte(tsn, ctsn)) {
1145 list_del_init(lchunk);
1146 sctp_chunk_free(chunk);
1147 } else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
1148 __be16 sid = chunk->subh.idata_hdr->stream;
1149 __be32 mid = chunk->subh.idata_hdr->mid;
1152 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1153 flags |= SCTP_FTSN_U_BIT;
1155 asoc->adv_peer_ack_point = tsn;
1156 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
1158 ftsn_skip_arr[skip_pos].stream = sid;
1159 ftsn_skip_arr[skip_pos].reserved = 0;
1160 ftsn_skip_arr[skip_pos].flags = flags;
1161 ftsn_skip_arr[skip_pos].mid = mid;
1162 if (skip_pos == nskips)
1171 if (asoc->adv_peer_ack_point > ctsn)
1172 ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
1173 nskips, &ftsn_skip_arr[0]);
1176 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1177 SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
1181 #define _sctp_walk_ifwdtsn(pos, chunk, end) \
1182 for (pos = chunk->subh.ifwdtsn_hdr->skip; \
1183 (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
1185 #define sctp_walk_ifwdtsn(pos, ch) \
1186 _sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
1187 sizeof(struct sctp_ifwdtsn_chunk))
1189 static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk)
1191 struct sctp_fwdtsn_skip *skip;
1194 if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN)
1197 incnt = chunk->asoc->stream.incnt;
1198 sctp_walk_fwdtsn(skip, chunk)
1199 if (ntohs(skip->stream) >= incnt)
1205 static bool sctp_validate_iftsn(struct sctp_chunk *chunk)
1207 struct sctp_ifwdtsn_skip *skip;
1210 if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN)
1213 incnt = chunk->asoc->stream.incnt;
1214 sctp_walk_ifwdtsn(skip, chunk)
1215 if (ntohs(skip->stream) >= incnt)
1221 static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1223 /* Move the Cumulattive TSN Ack ahead. */
1224 sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1225 /* purge the fragmentation queue */
1226 sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
1227 /* Abort any in progress partial delivery. */
1228 sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
1231 static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1233 struct sk_buff *pos, *tmp;
1235 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
1236 struct sctp_ulpevent *event = sctp_skb2event(pos);
1237 __u32 tsn = event->tsn;
1239 if (TSN_lte(tsn, ftsn)) {
1240 __skb_unlink(pos, &ulpq->reasm);
1241 sctp_ulpevent_free(event);
1245 skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
1246 struct sctp_ulpevent *event = sctp_skb2event(pos);
1247 __u32 tsn = event->tsn;
1249 if (TSN_lte(tsn, ftsn)) {
1250 __skb_unlink(pos, &ulpq->reasm_uo);
1251 sctp_ulpevent_free(event);
1256 static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1258 /* Move the Cumulattive TSN Ack ahead. */
1259 sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1260 /* purge the fragmentation queue */
1261 sctp_intl_reasm_flushtsn(ulpq, ftsn);
1262 /* abort only when it's for all data */
1263 if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
1264 sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
1267 static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1269 struct sctp_fwdtsn_skip *skip;
1271 /* Walk through all the skipped SSNs */
1272 sctp_walk_fwdtsn(skip, chunk)
1273 sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
1276 static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
1279 struct sctp_stream_in *sin = sctp_stream_in(&ulpq->asoc->stream, sid);
1280 struct sctp_stream *stream = &ulpq->asoc->stream;
1282 if (flags & SCTP_FTSN_U_BIT) {
1283 if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) {
1284 sin->pd_mode_uo = 0;
1285 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
1291 if (MID_lt(mid, sctp_mid_peek(stream, in, sid)))
1296 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
1299 sctp_mid_skip(stream, in, sid, mid);
1301 sctp_intl_reap_ordered(ulpq, sid);
1304 static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1306 struct sctp_ifwdtsn_skip *skip;
1308 /* Walk through all the skipped MIDs and abort stream pd if possible */
1309 sctp_walk_ifwdtsn(skip, chunk)
1310 sctp_intl_skip(ulpq, ntohs(skip->stream),
1311 ntohl(skip->mid), skip->flags);
1314 static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
1316 struct sk_buff_head temp;
1318 skb_queue_head_init(&temp);
1319 __skb_queue_tail(&temp, sctp_event2skb(event));
1320 return sctp_ulpq_tail_event(ulpq, &temp);
1323 static struct sctp_stream_interleave sctp_stream_interleave_0 = {
1324 .data_chunk_len = sizeof(struct sctp_data_chunk),
1325 .ftsn_chunk_len = sizeof(struct sctp_fwdtsn_chunk),
1326 /* DATA process functions */
1327 .make_datafrag = sctp_make_datafrag_empty,
1328 .assign_number = sctp_chunk_assign_ssn,
1329 .validate_data = sctp_validate_data,
1330 .ulpevent_data = sctp_ulpq_tail_data,
1331 .enqueue_event = do_ulpq_tail_event,
1332 .renege_events = sctp_ulpq_renege,
1333 .start_pd = sctp_ulpq_partial_delivery,
1334 .abort_pd = sctp_ulpq_abort_pd,
1335 /* FORWARD-TSN process functions */
1336 .generate_ftsn = sctp_generate_fwdtsn,
1337 .validate_ftsn = sctp_validate_fwdtsn,
1338 .report_ftsn = sctp_report_fwdtsn,
1339 .handle_ftsn = sctp_handle_fwdtsn,
1342 static int do_sctp_enqueue_event(struct sctp_ulpq *ulpq,
1343 struct sctp_ulpevent *event)
1345 struct sk_buff_head temp;
1347 skb_queue_head_init(&temp);
1348 __skb_queue_tail(&temp, sctp_event2skb(event));
1349 return sctp_enqueue_event(ulpq, &temp);
1352 static struct sctp_stream_interleave sctp_stream_interleave_1 = {
1353 .data_chunk_len = sizeof(struct sctp_idata_chunk),
1354 .ftsn_chunk_len = sizeof(struct sctp_ifwdtsn_chunk),
1355 /* I-DATA process functions */
1356 .make_datafrag = sctp_make_idatafrag_empty,
1357 .assign_number = sctp_chunk_assign_mid,
1358 .validate_data = sctp_validate_idata,
1359 .ulpevent_data = sctp_ulpevent_idata,
1360 .enqueue_event = do_sctp_enqueue_event,
1361 .renege_events = sctp_renege_events,
1362 .start_pd = sctp_intl_start_pd,
1363 .abort_pd = sctp_intl_abort_pd,
1364 /* I-FORWARD-TSN process functions */
1365 .generate_ftsn = sctp_generate_iftsn,
1366 .validate_ftsn = sctp_validate_iftsn,
1367 .report_ftsn = sctp_report_iftsn,
1368 .handle_ftsn = sctp_handle_iftsn,
1371 void sctp_stream_interleave_init(struct sctp_stream *stream)
1373 struct sctp_association *asoc;
1375 asoc = container_of(stream, struct sctp_association, stream);
1376 stream->si = asoc->intl_enable ? &sctp_stream_interleave_1
1377 : &sctp_stream_interleave_0;